2024-12-12 19:31:54,649 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-12 19:31:54,674 main DEBUG Took 0.022693 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-12 19:31:54,675 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-12 19:31:54,675 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-12 19:31:54,676 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-12 19:31:54,678 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 19:31:54,689 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-12 19:31:54,705 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 19:31:54,707 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 19:31:54,707 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 19:31:54,708 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 19:31:54,708 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 19:31:54,719 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 19:31:54,721 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 19:31:54,721 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 19:31:54,728 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 19:31:54,728 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 19:31:54,730 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 19:31:54,730 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 19:31:54,731 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 19:31:54,731 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 19:31:54,732 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 19:31:54,732 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 19:31:54,733 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 19:31:54,733 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 19:31:54,734 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 19:31:54,735 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 19:31:54,735 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 19:31:54,736 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 19:31:54,736 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 19:31:54,737 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 19:31:54,738 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 19:31:54,738 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-12 19:31:54,748 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 19:31:54,750 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-12 19:31:54,753 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-12 19:31:54,754 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-12 19:31:54,756 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-12 19:31:54,757 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-12 19:31:54,769 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-12 19:31:54,773 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-12 19:31:54,781 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-12 19:31:54,782 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-12 19:31:54,783 main DEBUG createAppenders(={Console}) 2024-12-12 19:31:54,784 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-12 19:31:54,784 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-12 19:31:54,785 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-12 19:31:54,785 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-12 19:31:54,786 main DEBUG OutputStream closed 2024-12-12 19:31:54,786 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-12 19:31:54,787 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-12 19:31:54,787 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-12 19:31:54,903 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-12 19:31:54,905 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-12 19:31:54,907 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-12 19:31:54,909 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-12 19:31:54,909 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-12 19:31:54,910 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-12 19:31:54,911 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-12 19:31:54,912 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-12 19:31:54,913 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-12 19:31:54,914 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-12 19:31:54,914 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-12 19:31:54,915 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-12 19:31:54,915 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-12 19:31:54,916 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-12 19:31:54,916 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-12 19:31:54,916 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-12 19:31:54,917 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-12 19:31:54,918 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-12 19:31:54,920 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-12 19:31:54,921 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-12 19:31:54,921 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-12 19:31:54,922 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-12T19:31:55,319 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463 2024-12-12 19:31:55,322 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-12 19:31:55,323 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-12T19:31:55,340 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-12T19:31:55,388 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-12T19:31:55,392 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/cluster_c86be23e-f44a-87b2-e345-8f5fe86e6095, deleteOnExit=true 2024-12-12T19:31:55,392 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-12T19:31:55,393 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/test.cache.data in system properties and HBase conf 2024-12-12T19:31:55,394 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/hadoop.tmp.dir in system properties and HBase conf 2024-12-12T19:31:55,396 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/hadoop.log.dir in system properties and HBase conf 2024-12-12T19:31:55,396 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-12T19:31:55,397 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-12T19:31:55,397 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-12T19:31:55,505 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-12T19:31:55,624 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-12T19:31:55,629 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-12T19:31:55,630 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-12T19:31:55,631 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-12T19:31:55,632 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T19:31:55,633 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-12T19:31:55,633 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-12T19:31:55,634 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T19:31:55,635 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T19:31:55,635 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-12T19:31:55,636 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/nfs.dump.dir in system properties and HBase conf 2024-12-12T19:31:55,637 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/java.io.tmpdir in system properties and HBase conf 2024-12-12T19:31:55,637 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T19:31:55,638 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-12T19:31:55,638 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-12T19:31:56,847 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-12T19:31:57,016 INFO [Time-limited test {}] log.Log(170): Logging initialized @3340ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-12T19:31:57,139 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T19:31:57,269 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T19:31:57,323 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T19:31:57,324 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T19:31:57,326 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-12T19:31:57,360 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T19:31:57,372 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/hadoop.log.dir/,AVAILABLE} 2024-12-12T19:31:57,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T19:31:57,655 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/java.io.tmpdir/jetty-localhost-35691-hadoop-hdfs-3_4_1-tests_jar-_-any-17399525039143373640/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-12T19:31:57,694 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:35691} 2024-12-12T19:31:57,694 INFO [Time-limited test {}] server.Server(415): Started @4020ms 2024-12-12T19:31:58,658 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T19:31:58,671 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T19:31:58,704 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T19:31:58,705 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T19:31:58,705 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-12T19:31:58,708 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/hadoop.log.dir/,AVAILABLE} 2024-12-12T19:31:58,714 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T19:31:58,870 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/java.io.tmpdir/jetty-localhost-38343-hadoop-hdfs-3_4_1-tests_jar-_-any-3116153660493724728/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T19:31:58,872 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:38343} 2024-12-12T19:31:58,872 INFO [Time-limited test {}] server.Server(415): Started @5198ms 2024-12-12T19:31:58,991 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-12T19:32:00,567 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/cluster_c86be23e-f44a-87b2-e345-8f5fe86e6095/dfs/data/data1/current/BP-554092088-172.17.0.2-1734031916410/current, will proceed with Du for space computation calculation, 2024-12-12T19:32:00,570 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/cluster_c86be23e-f44a-87b2-e345-8f5fe86e6095/dfs/data/data2/current/BP-554092088-172.17.0.2-1734031916410/current, will proceed with Du for space computation calculation, 2024-12-12T19:32:00,713 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-12T19:32:00,807 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2ff39d6e20c2bad1 with lease ID 0xb5757e79e052f45c: Processing first storage report for DS-b8d3ff5a-1892-45d1-9bcb-9f55f652613f from datanode DatanodeRegistration(127.0.0.1:35221, datanodeUuid=e4e4933b-84ad-40c1-bcb6-aee34af92fef, infoPort=40851, infoSecurePort=0, ipcPort=43119, storageInfo=lv=-57;cid=testClusterID;nsid=307625319;c=1734031916410) 2024-12-12T19:32:00,809 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2ff39d6e20c2bad1 with lease ID 0xb5757e79e052f45c: from storage DS-b8d3ff5a-1892-45d1-9bcb-9f55f652613f node DatanodeRegistration(127.0.0.1:35221, datanodeUuid=e4e4933b-84ad-40c1-bcb6-aee34af92fef, infoPort=40851, infoSecurePort=0, ipcPort=43119, storageInfo=lv=-57;cid=testClusterID;nsid=307625319;c=1734031916410), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-12T19:32:00,817 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2ff39d6e20c2bad1 with lease ID 0xb5757e79e052f45c: Processing first storage report for DS-08ff8966-515c-47f0-ac7e-2cfd9f61f876 from datanode DatanodeRegistration(127.0.0.1:35221, datanodeUuid=e4e4933b-84ad-40c1-bcb6-aee34af92fef, infoPort=40851, infoSecurePort=0, ipcPort=43119, storageInfo=lv=-57;cid=testClusterID;nsid=307625319;c=1734031916410) 2024-12-12T19:32:00,818 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2ff39d6e20c2bad1 with lease ID 0xb5757e79e052f45c: from storage DS-08ff8966-515c-47f0-ac7e-2cfd9f61f876 node DatanodeRegistration(127.0.0.1:35221, datanodeUuid=e4e4933b-84ad-40c1-bcb6-aee34af92fef, infoPort=40851, infoSecurePort=0, ipcPort=43119, storageInfo=lv=-57;cid=testClusterID;nsid=307625319;c=1734031916410), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T19:32:00,857 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463 2024-12-12T19:32:01,025 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/cluster_c86be23e-f44a-87b2-e345-8f5fe86e6095/zookeeper_0, clientPort=52216, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/cluster_c86be23e-f44a-87b2-e345-8f5fe86e6095/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/cluster_c86be23e-f44a-87b2-e345-8f5fe86e6095/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-12T19:32:01,053 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=52216 2024-12-12T19:32:01,072 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T19:32:01,075 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T19:32:01,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741825_1001 (size=7) 2024-12-12T19:32:01,538 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98 with version=8 2024-12-12T19:32:01,538 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/hbase-staging 2024-12-12T19:32:01,760 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-12T19:32:02,227 INFO [Time-limited test {}] client.ConnectionUtils(129): master/4c9c438b6eeb:0 server-side Connection retries=45 2024-12-12T19:32:02,255 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T19:32:02,256 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T19:32:02,256 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T19:32:02,258 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T19:32:02,259 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T19:32:02,478 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T19:32:02,564 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-12T19:32:02,575 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-12T19:32:02,581 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T19:32:02,608 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 43634 (auto-detected) 2024-12-12T19:32:02,609 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-12T19:32:02,634 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:40199 2024-12-12T19:32:02,644 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T19:32:02,647 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T19:32:02,662 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:40199 connecting to ZooKeeper ensemble=127.0.0.1:52216 2024-12-12T19:32:02,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:401990x0, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T19:32:02,773 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40199-0x1001bba6bd70000 connected 2024-12-12T19:32:02,923 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T19:32:02,927 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T19:32:02,932 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T19:32:02,936 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40199 2024-12-12T19:32:02,937 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40199 2024-12-12T19:32:02,937 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40199 2024-12-12T19:32:02,940 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40199 2024-12-12T19:32:02,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40199 2024-12-12T19:32:02,952 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98, hbase.cluster.distributed=false 2024-12-12T19:32:03,039 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/4c9c438b6eeb:0 server-side Connection retries=45 2024-12-12T19:32:03,039 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T19:32:03,040 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T19:32:03,040 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T19:32:03,040 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T19:32:03,040 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T19:32:03,043 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T19:32:03,047 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T19:32:03,058 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42689 2024-12-12T19:32:03,061 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-12T19:32:03,079 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-12T19:32:03,083 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T19:32:03,087 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T19:32:03,092 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42689 connecting to ZooKeeper ensemble=127.0.0.1:52216 2024-12-12T19:32:03,126 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:426890x0, quorum=127.0.0.1:52216, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T19:32:03,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:426890x0, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T19:32:03,151 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42689-0x1001bba6bd70001 connected 2024-12-12T19:32:03,156 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T19:32:03,175 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T19:32:03,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42689 2024-12-12T19:32:03,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42689 2024-12-12T19:32:03,219 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42689 2024-12-12T19:32:03,241 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42689 2024-12-12T19:32:03,243 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42689 2024-12-12T19:32:03,249 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/4c9c438b6eeb,40199,1734031921750 2024-12-12T19:32:03,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T19:32:03,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T19:32:03,278 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/4c9c438b6eeb,40199,1734031921750 2024-12-12T19:32:03,281 DEBUG [M:0;4c9c438b6eeb:40199 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;4c9c438b6eeb:40199 2024-12-12T19:32:03,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T19:32:03,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T19:32:03,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:32:03,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:32:03,331 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-12T19:32:03,338 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-12T19:32:03,339 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/4c9c438b6eeb,40199,1734031921750 from backup master directory 2024-12-12T19:32:03,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T19:32:03,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/4c9c438b6eeb,40199,1734031921750 2024-12-12T19:32:03,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T19:32:03,353 WARN [master/4c9c438b6eeb:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T19:32:03,353 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=4c9c438b6eeb,40199,1734031921750 2024-12-12T19:32:03,356 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-12T19:32:03,359 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-12T19:32:03,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741826_1002 (size=42) 2024-12-12T19:32:03,551 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/hbase.id with ID: f37b24fc-3419-4cb2-a496-bd3d837e5597 2024-12-12T19:32:03,644 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T19:32:03,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:32:03,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:32:03,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741827_1003 (size=196) 2024-12-12T19:32:03,887 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T19:32:03,890 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-12T19:32:03,921 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:03,929 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T19:32:04,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741828_1004 (size=1189) 2024-12-12T19:32:04,136 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store 2024-12-12T19:32:04,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741829_1005 (size=34) 2024-12-12T19:32:04,668 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-12T19:32:04,669 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:32:04,670 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-12T19:32:04,670 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T19:32:04,670 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T19:32:04,671 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-12T19:32:04,671 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T19:32:04,671 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T19:32:04,671 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-12T19:32:04,690 WARN [master/4c9c438b6eeb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/.initializing 2024-12-12T19:32:04,690 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/WALs/4c9c438b6eeb,40199,1734031921750 2024-12-12T19:32:04,713 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T19:32:04,758 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4c9c438b6eeb%2C40199%2C1734031921750, suffix=, logDir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/WALs/4c9c438b6eeb,40199,1734031921750, archiveDir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/oldWALs, maxLogs=10 2024-12-12T19:32:04,802 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/WALs/4c9c438b6eeb,40199,1734031921750/4c9c438b6eeb%2C40199%2C1734031921750.1734031924773, exclude list is [], retry=0 2024-12-12T19:32:04,865 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35221,DS-b8d3ff5a-1892-45d1-9bcb-9f55f652613f,DISK] 2024-12-12T19:32:04,890 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-12T19:32:04,975 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/WALs/4c9c438b6eeb,40199,1734031921750/4c9c438b6eeb%2C40199%2C1734031921750.1734031924773 2024-12-12T19:32:04,988 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40851:40851)] 2024-12-12T19:32:04,989 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-12T19:32:04,989 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:32:04,996 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T19:32:05,008 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T19:32:05,107 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T19:32:05,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-12T19:32:05,163 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:05,174 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T19:32:05,180 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T19:32:05,197 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-12T19:32:05,197 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:05,205 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:32:05,207 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T19:32:05,226 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-12T19:32:05,226 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:05,231 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:32:05,232 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T19:32:05,258 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-12T19:32:05,258 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:05,265 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:32:05,279 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-12T19:32:05,288 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-12T19:32:05,321 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-12T19:32:05,354 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T19:32:05,403 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T19:32:05,407 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63022105, jitterRate=-0.060897454619407654}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-12T19:32:05,420 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-12T19:32:05,426 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-12T19:32:05,527 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f68ea0b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:05,615 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-12T19:32:05,636 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-12T19:32:05,637 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-12T19:32:05,641 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-12T19:32:05,644 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 2 msec 2024-12-12T19:32:05,656 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 10 msec 2024-12-12T19:32:05,656 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-12T19:32:05,722 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-12T19:32:05,745 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-12T19:32:05,799 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-12T19:32:05,805 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-12T19:32:05,813 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-12T19:32:05,827 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-12T19:32:05,830 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-12T19:32:05,844 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-12T19:32:05,857 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-12T19:32:05,867 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-12T19:32:05,885 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-12T19:32:05,921 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-12T19:32:05,951 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-12T19:32:05,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T19:32:05,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:32:05,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T19:32:05,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:32:05,987 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=4c9c438b6eeb,40199,1734031921750, sessionid=0x1001bba6bd70000, setting cluster-up flag (Was=false) 2024-12-12T19:32:06,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:32:06,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:32:06,141 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-12T19:32:06,174 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4c9c438b6eeb,40199,1734031921750 2024-12-12T19:32:06,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:32:06,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:32:06,326 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-12T19:32:06,346 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4c9c438b6eeb,40199,1734031921750 2024-12-12T19:32:06,410 DEBUG [RS:0;4c9c438b6eeb:42689 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;4c9c438b6eeb:42689 2024-12-12T19:32:06,423 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1008): ClusterId : f37b24fc-3419-4cb2-a496-bd3d837e5597 2024-12-12T19:32:06,429 DEBUG [RS:0;4c9c438b6eeb:42689 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-12T19:32:06,501 DEBUG [RS:0;4c9c438b6eeb:42689 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-12T19:32:06,502 DEBUG [RS:0;4c9c438b6eeb:42689 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-12T19:32:06,552 DEBUG [RS:0;4c9c438b6eeb:42689 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-12T19:32:06,552 DEBUG [RS:0;4c9c438b6eeb:42689 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26efb5b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:06,563 DEBUG [RS:0;4c9c438b6eeb:42689 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20d42c81, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4c9c438b6eeb/172.17.0.2:0 2024-12-12T19:32:06,556 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-12T19:32:06,567 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-12T19:32:06,568 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-12T19:32:06,568 DEBUG [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-12T19:32:06,580 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-12T19:32:06,584 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-12T19:32:06,584 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(3073): reportForDuty to master=4c9c438b6eeb,40199,1734031921750 with isa=4c9c438b6eeb/172.17.0.2:42689, startcode=1734031923038 2024-12-12T19:32:06,598 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 4c9c438b6eeb,40199,1734031921750 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-12T19:32:06,609 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/4c9c438b6eeb:0, corePoolSize=5, maxPoolSize=5 2024-12-12T19:32:06,610 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/4c9c438b6eeb:0, corePoolSize=5, maxPoolSize=5 2024-12-12T19:32:06,610 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/4c9c438b6eeb:0, corePoolSize=5, maxPoolSize=5 2024-12-12T19:32:06,611 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/4c9c438b6eeb:0, corePoolSize=5, maxPoolSize=5 2024-12-12T19:32:06,611 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/4c9c438b6eeb:0, corePoolSize=10, maxPoolSize=10 2024-12-12T19:32:06,613 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/4c9c438b6eeb:0, corePoolSize=1, maxPoolSize=1 2024-12-12T19:32:06,613 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/4c9c438b6eeb:0, corePoolSize=2, maxPoolSize=2 2024-12-12T19:32:06,614 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/4c9c438b6eeb:0, corePoolSize=1, maxPoolSize=1 2024-12-12T19:32:06,620 DEBUG [RS:0;4c9c438b6eeb:42689 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T19:32:06,657 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734031956656 2024-12-12T19:32:06,659 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-12T19:32:06,660 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-12T19:32:06,660 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-12T19:32:06,661 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-12T19:32:06,665 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-12T19:32:06,665 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-12T19:32:06,666 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-12T19:32:06,666 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-12T19:32:06,675 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:06,689 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-12T19:32:06,691 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-12T19:32:06,692 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-12T19:32:06,695 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:06,695 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-12T19:32:06,708 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-12T19:32:06,708 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-12T19:32:06,721 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/4c9c438b6eeb:0:becomeActiveMaster-HFileCleaner.large.0-1734031926719,5,FailOnTimeoutGroup] 2024-12-12T19:32:06,750 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/4c9c438b6eeb:0:becomeActiveMaster-HFileCleaner.small.0-1734031926721,5,FailOnTimeoutGroup] 2024-12-12T19:32:06,750 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:06,751 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-12T19:32:06,764 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56577, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T19:32:06,767 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:06,768 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:06,778 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40199 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:06,781 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40199 {}] master.ServerManager(486): Registering regionserver=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:06,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741831_1007 (size=1039) 2024-12-12T19:32:06,822 DEBUG [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98 2024-12-12T19:32:06,828 DEBUG [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:38311 2024-12-12T19:32:06,828 DEBUG [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-12T19:32:06,841 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-12T19:32:06,841 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98 2024-12-12T19:32:06,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T19:32:06,859 DEBUG [RS:0;4c9c438b6eeb:42689 {}] zookeeper.ZKUtil(111): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:06,860 WARN [RS:0;4c9c438b6eeb:42689 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T19:32:06,860 INFO [RS:0;4c9c438b6eeb:42689 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T19:32:06,861 DEBUG [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/WALs/4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:06,865 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4c9c438b6eeb,42689,1734031923038] 2024-12-12T19:32:06,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741832_1008 (size=32) 2024-12-12T19:32:06,931 DEBUG [RS:0;4c9c438b6eeb:42689 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-12T19:32:06,936 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:32:06,957 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-12T19:32:06,971 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-12T19:32:06,988 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-12T19:32:06,988 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:06,996 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-12T19:32:07,000 INFO [RS:0;4c9c438b6eeb:42689 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-12T19:32:07,001 INFO [RS:0;4c9c438b6eeb:42689 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:07,003 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T19:32:07,004 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-12T19:32:07,006 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-12T19:32:07,024 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-12T19:32:07,024 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:07,026 INFO [RS:0;4c9c438b6eeb:42689 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:07,031 DEBUG [RS:0;4c9c438b6eeb:42689 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4c9c438b6eeb:0, corePoolSize=1, maxPoolSize=1 2024-12-12T19:32:07,032 DEBUG [RS:0;4c9c438b6eeb:42689 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4c9c438b6eeb:0, corePoolSize=1, maxPoolSize=1 2024-12-12T19:32:07,032 DEBUG [RS:0;4c9c438b6eeb:42689 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4c9c438b6eeb:0, corePoolSize=1, maxPoolSize=1 2024-12-12T19:32:07,032 DEBUG [RS:0;4c9c438b6eeb:42689 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0, corePoolSize=1, maxPoolSize=1 2024-12-12T19:32:07,032 DEBUG [RS:0;4c9c438b6eeb:42689 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4c9c438b6eeb:0, corePoolSize=1, maxPoolSize=1 2024-12-12T19:32:07,032 DEBUG [RS:0;4c9c438b6eeb:42689 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4c9c438b6eeb:0, corePoolSize=2, maxPoolSize=2 2024-12-12T19:32:07,039 DEBUG [RS:0;4c9c438b6eeb:42689 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4c9c438b6eeb:0, corePoolSize=1, maxPoolSize=1 2024-12-12T19:32:07,039 DEBUG [RS:0;4c9c438b6eeb:42689 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4c9c438b6eeb:0, corePoolSize=1, maxPoolSize=1 2024-12-12T19:32:07,043 DEBUG [RS:0;4c9c438b6eeb:42689 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4c9c438b6eeb:0, corePoolSize=1, maxPoolSize=1 2024-12-12T19:32:07,044 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T19:32:07,044 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-12T19:32:07,044 DEBUG [RS:0;4c9c438b6eeb:42689 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4c9c438b6eeb:0, corePoolSize=1, maxPoolSize=1 2024-12-12T19:32:07,045 DEBUG [RS:0;4c9c438b6eeb:42689 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4c9c438b6eeb:0, corePoolSize=1, maxPoolSize=1 2024-12-12T19:32:07,045 DEBUG [RS:0;4c9c438b6eeb:42689 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4c9c438b6eeb:0, corePoolSize=3, maxPoolSize=3 2024-12-12T19:32:07,046 DEBUG [RS:0;4c9c438b6eeb:42689 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0, corePoolSize=3, maxPoolSize=3 2024-12-12T19:32:07,060 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-12T19:32:07,061 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:07,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T19:32:07,072 INFO [RS:0;4c9c438b6eeb:42689 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:07,072 INFO [RS:0;4c9c438b6eeb:42689 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:07,072 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740 2024-12-12T19:32:07,075 INFO [RS:0;4c9c438b6eeb:42689 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:07,080 INFO [RS:0;4c9c438b6eeb:42689 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:07,080 INFO [RS:0;4c9c438b6eeb:42689 {}] hbase.ChoreService(168): Chore ScheduledChore name=4c9c438b6eeb,42689,1734031923038-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T19:32:07,081 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740 2024-12-12T19:32:07,105 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T19:32:07,120 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-12T19:32:07,131 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-12T19:32:07,134 INFO [RS:0;4c9c438b6eeb:42689 {}] hbase.ChoreService(168): Chore ScheduledChore name=4c9c438b6eeb,42689,1734031923038-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:07,159 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T19:32:07,166 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67362151, jitterRate=0.0037742704153060913}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T19:32:07,169 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-12T19:32:07,169 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-12T19:32:07,169 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-12T19:32:07,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-12T19:32:07,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-12T19:32:07,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-12T19:32:07,180 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-12T19:32:07,180 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-12T19:32:07,183 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.Replication(204): 4c9c438b6eeb,42689,1734031923038 started 2024-12-12T19:32:07,183 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1767): Serving as 4c9c438b6eeb,42689,1734031923038, RpcServer on 4c9c438b6eeb/172.17.0.2:42689, sessionid=0x1001bba6bd70001 2024-12-12T19:32:07,184 DEBUG [RS:0;4c9c438b6eeb:42689 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-12T19:32:07,185 DEBUG [RS:0;4c9c438b6eeb:42689 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:07,185 DEBUG [RS:0;4c9c438b6eeb:42689 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4c9c438b6eeb,42689,1734031923038' 2024-12-12T19:32:07,185 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-12T19:32:07,185 DEBUG [RS:0;4c9c438b6eeb:42689 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-12T19:32:07,185 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-12T19:32:07,188 DEBUG [RS:0;4c9c438b6eeb:42689 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-12T19:32:07,190 DEBUG [RS:0;4c9c438b6eeb:42689 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-12T19:32:07,190 DEBUG [RS:0;4c9c438b6eeb:42689 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-12T19:32:07,190 DEBUG [RS:0;4c9c438b6eeb:42689 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:07,190 DEBUG [RS:0;4c9c438b6eeb:42689 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4c9c438b6eeb,42689,1734031923038' 2024-12-12T19:32:07,190 DEBUG [RS:0;4c9c438b6eeb:42689 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-12T19:32:07,193 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-12T19:32:07,200 DEBUG [RS:0;4c9c438b6eeb:42689 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-12T19:32:07,223 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-12T19:32:07,227 DEBUG [RS:0;4c9c438b6eeb:42689 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-12T19:32:07,227 INFO [RS:0;4c9c438b6eeb:42689 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-12T19:32:07,227 INFO [RS:0;4c9c438b6eeb:42689 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-12T19:32:07,232 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-12T19:32:07,338 INFO [RS:0;4c9c438b6eeb:42689 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T19:32:07,362 INFO [RS:0;4c9c438b6eeb:42689 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4c9c438b6eeb%2C42689%2C1734031923038, suffix=, logDir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/WALs/4c9c438b6eeb,42689,1734031923038, archiveDir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/oldWALs, maxLogs=32 2024-12-12T19:32:07,383 WARN [4c9c438b6eeb:40199 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-12T19:32:07,406 DEBUG [RS:0;4c9c438b6eeb:42689 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/WALs/4c9c438b6eeb,42689,1734031923038/4c9c438b6eeb%2C42689%2C1734031923038.1734031927365, exclude list is [], retry=0 2024-12-12T19:32:07,431 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35221,DS-b8d3ff5a-1892-45d1-9bcb-9f55f652613f,DISK] 2024-12-12T19:32:07,464 INFO [RS:0;4c9c438b6eeb:42689 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/WALs/4c9c438b6eeb,42689,1734031923038/4c9c438b6eeb%2C42689%2C1734031923038.1734031927365 2024-12-12T19:32:07,467 DEBUG [RS:0;4c9c438b6eeb:42689 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40851:40851)] 2024-12-12T19:32:07,636 DEBUG [4c9c438b6eeb:40199 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-12T19:32:07,642 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:07,650 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4c9c438b6eeb,42689,1734031923038, state=OPENING 2024-12-12T19:32:07,693 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-12T19:32:07,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:32:07,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:32:07,726 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T19:32:07,726 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T19:32:07,728 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:32:07,928 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:07,934 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T19:32:07,938 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50546, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T19:32:07,965 INFO [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-12T19:32:07,965 INFO [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T19:32:07,966 INFO [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-12T19:32:07,981 INFO [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4c9c438b6eeb%2C42689%2C1734031923038.meta, suffix=.meta, logDir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/WALs/4c9c438b6eeb,42689,1734031923038, archiveDir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/oldWALs, maxLogs=32 2024-12-12T19:32:08,004 DEBUG [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/WALs/4c9c438b6eeb,42689,1734031923038/4c9c438b6eeb%2C42689%2C1734031923038.meta.1734031927983.meta, exclude list is [], retry=0 2024-12-12T19:32:08,024 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35221,DS-b8d3ff5a-1892-45d1-9bcb-9f55f652613f,DISK] 2024-12-12T19:32:08,047 INFO [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/WALs/4c9c438b6eeb,42689,1734031923038/4c9c438b6eeb%2C42689%2C1734031923038.meta.1734031927983.meta 2024-12-12T19:32:08,053 DEBUG [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40851:40851)] 2024-12-12T19:32:08,054 DEBUG [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-12T19:32:08,056 DEBUG [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-12T19:32:08,152 DEBUG [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-12T19:32:08,158 INFO [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-12T19:32:08,163 DEBUG [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-12T19:32:08,164 DEBUG [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:32:08,164 DEBUG [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-12T19:32:08,164 DEBUG [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-12T19:32:08,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-12T19:32:08,178 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-12T19:32:08,178 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:08,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T19:32:08,184 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-12T19:32:08,192 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-12T19:32:08,192 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:08,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T19:32:08,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-12T19:32:08,198 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-12T19:32:08,198 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:08,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T19:32:08,202 DEBUG [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740 2024-12-12T19:32:08,216 DEBUG [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740 2024-12-12T19:32:08,236 DEBUG [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T19:32:08,252 DEBUG [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-12T19:32:08,266 INFO [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67515368, jitterRate=0.006057381629943848}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T19:32:08,270 DEBUG [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-12T19:32:08,283 INFO [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734031927920 2024-12-12T19:32:08,304 DEBUG [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-12T19:32:08,305 INFO [RS_OPEN_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-12T19:32:08,306 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:08,309 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4c9c438b6eeb,42689,1734031923038, state=OPEN 2024-12-12T19:32:08,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T19:32:08,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T19:32:08,414 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T19:32:08,414 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T19:32:08,429 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-12T19:32:08,429 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=4c9c438b6eeb,42689,1734031923038 in 686 msec 2024-12-12T19:32:08,439 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-12T19:32:08,439 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.2380 sec 2024-12-12T19:32:08,453 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 2.0070 sec 2024-12-12T19:32:08,453 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734031928453, completionTime=-1 2024-12-12T19:32:08,453 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-12T19:32:08,453 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-12T19:32:08,505 DEBUG [hconnection-0x77ea215e-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:08,508 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50556, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:08,522 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-12T19:32:08,522 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734031988522 2024-12-12T19:32:08,522 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734032048522 2024-12-12T19:32:08,522 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 68 msec 2024-12-12T19:32:08,585 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4c9c438b6eeb,40199,1734031921750-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:08,586 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4c9c438b6eeb,40199,1734031921750-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:08,586 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4c9c438b6eeb,40199,1734031921750-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:08,588 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-4c9c438b6eeb:40199, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:08,589 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:08,598 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-12T19:32:08,600 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-12T19:32:08,613 DEBUG [master/4c9c438b6eeb:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-12T19:32:08,618 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-12T19:32:08,626 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T19:32:08,627 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:08,630 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T19:32:08,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741835_1011 (size=358) 2024-12-12T19:32:08,672 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2c541955553f42ed357f6055374132eb, NAME => 'hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98 2024-12-12T19:32:08,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741836_1012 (size=42) 2024-12-12T19:32:09,090 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:32:09,091 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 2c541955553f42ed357f6055374132eb, disabling compactions & flushes 2024-12-12T19:32:09,091 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb. 2024-12-12T19:32:09,091 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb. 2024-12-12T19:32:09,091 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb. after waiting 0 ms 2024-12-12T19:32:09,091 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb. 2024-12-12T19:32:09,094 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb. 2024-12-12T19:32:09,094 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2c541955553f42ed357f6055374132eb: 2024-12-12T19:32:09,100 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T19:32:09,107 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734031929101"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734031929101"}]},"ts":"1734031929101"} 2024-12-12T19:32:09,137 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T19:32:09,139 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T19:32:09,142 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734031929140"}]},"ts":"1734031929140"} 2024-12-12T19:32:09,151 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-12T19:32:09,178 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=2c541955553f42ed357f6055374132eb, ASSIGN}] 2024-12-12T19:32:09,181 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=2c541955553f42ed357f6055374132eb, ASSIGN 2024-12-12T19:32:09,183 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=2c541955553f42ed357f6055374132eb, ASSIGN; state=OFFLINE, location=4c9c438b6eeb,42689,1734031923038; forceNewPlan=false, retain=false 2024-12-12T19:32:09,336 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=2c541955553f42ed357f6055374132eb, regionState=OPENING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:09,340 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 2c541955553f42ed357f6055374132eb, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:32:09,495 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:09,503 INFO [RS_OPEN_PRIORITY_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb. 2024-12-12T19:32:09,504 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 2c541955553f42ed357f6055374132eb, NAME => 'hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb.', STARTKEY => '', ENDKEY => ''} 2024-12-12T19:32:09,505 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 2c541955553f42ed357f6055374132eb 2024-12-12T19:32:09,505 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:32:09,505 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 2c541955553f42ed357f6055374132eb 2024-12-12T19:32:09,505 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 2c541955553f42ed357f6055374132eb 2024-12-12T19:32:09,515 INFO [StoreOpener-2c541955553f42ed357f6055374132eb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2c541955553f42ed357f6055374132eb 2024-12-12T19:32:09,524 INFO [StoreOpener-2c541955553f42ed357f6055374132eb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c541955553f42ed357f6055374132eb columnFamilyName info 2024-12-12T19:32:09,524 DEBUG [StoreOpener-2c541955553f42ed357f6055374132eb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:09,526 INFO [StoreOpener-2c541955553f42ed357f6055374132eb-1 {}] regionserver.HStore(327): Store=2c541955553f42ed357f6055374132eb/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:32:09,529 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/namespace/2c541955553f42ed357f6055374132eb 2024-12-12T19:32:09,530 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/namespace/2c541955553f42ed357f6055374132eb 2024-12-12T19:32:09,540 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 2c541955553f42ed357f6055374132eb 2024-12-12T19:32:09,556 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/namespace/2c541955553f42ed357f6055374132eb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T19:32:09,559 INFO [RS_OPEN_PRIORITY_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 2c541955553f42ed357f6055374132eb; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60332056, jitterRate=-0.10098230838775635}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T19:32:09,561 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 2c541955553f42ed357f6055374132eb: 2024-12-12T19:32:09,567 INFO [RS_OPEN_PRIORITY_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb., pid=6, masterSystemTime=1734031929495 2024-12-12T19:32:09,579 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb. 2024-12-12T19:32:09,579 INFO [RS_OPEN_PRIORITY_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb. 2024-12-12T19:32:09,592 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=2c541955553f42ed357f6055374132eb, regionState=OPEN, openSeqNum=2, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:09,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-12T19:32:09,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 2c541955553f42ed357f6055374132eb, server=4c9c438b6eeb,42689,1734031923038 in 263 msec 2024-12-12T19:32:09,630 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-12T19:32:09,630 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=2c541955553f42ed357f6055374132eb, ASSIGN in 442 msec 2024-12-12T19:32:09,634 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T19:32:09,634 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734031929634"}]},"ts":"1734031929634"} 2024-12-12T19:32:09,639 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-12T19:32:10,046 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-12T19:32:10,047 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T19:32:10,053 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.4460 sec 2024-12-12T19:32:10,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:32:10,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-12T19:32:10,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:32:10,157 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-12T19:32:10,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-12T19:32:10,272 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 112 msec 2024-12-12T19:32:10,287 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-12T19:32:10,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-12T19:32:10,379 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 90 msec 2024-12-12T19:32:10,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-12T19:32:10,435 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 7.082sec 2024-12-12T19:32:10,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-12T19:32:10,437 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-12T19:32:10,439 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-12T19:32:10,440 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-12T19:32:10,441 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-12T19:32:10,444 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-12T19:32:10,446 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4c9c438b6eeb,40199,1734031921750-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T19:32:10,446 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4c9c438b6eeb,40199,1734031921750-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-12T19:32:10,492 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-12T19:32:10,493 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-12T19:32:10,493 INFO [master/4c9c438b6eeb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4c9c438b6eeb,40199,1734031921750-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T19:32:10,561 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e83c466 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39dee83f 2024-12-12T19:32:10,567 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-12T19:32:10,612 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67b8b597, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:10,616 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-12T19:32:10,616 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-12T19:32:10,633 DEBUG [hconnection-0x4c09ef46-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:10,646 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48446, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:10,660 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=4c9c438b6eeb,40199,1734031921750 2024-12-12T19:32:10,683 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=218, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=1059, ProcessCount=11, AvailableMemoryMB=9951 2024-12-12T19:32:10,700 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T19:32:10,709 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48352, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T19:32:10,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T19:32:10,723 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T19:32:10,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T19:32:10,729 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T19:32:10,790 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:10,793 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T19:32:10,796 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-12T19:32:10,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T19:32:10,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741837_1013 (size=963) 2024-12-12T19:32:10,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T19:32:11,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T19:32:11,286 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98 2024-12-12T19:32:11,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741838_1014 (size=53) 2024-12-12T19:32:11,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T19:32:11,707 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:32:11,708 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 6ffb87fb734b5d4ed7499f1da86f79f5, disabling compactions & flushes 2024-12-12T19:32:11,708 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:11,708 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:11,708 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. after waiting 0 ms 2024-12-12T19:32:11,708 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:11,708 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:11,708 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:11,713 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T19:32:11,714 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734031931713"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734031931713"}]},"ts":"1734031931713"} 2024-12-12T19:32:11,720 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T19:32:11,722 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T19:32:11,722 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734031931722"}]},"ts":"1734031931722"} 2024-12-12T19:32:11,732 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T19:32:11,768 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6ffb87fb734b5d4ed7499f1da86f79f5, ASSIGN}] 2024-12-12T19:32:11,780 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6ffb87fb734b5d4ed7499f1da86f79f5, ASSIGN 2024-12-12T19:32:11,811 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=6ffb87fb734b5d4ed7499f1da86f79f5, ASSIGN; state=OFFLINE, location=4c9c438b6eeb,42689,1734031923038; forceNewPlan=false, retain=false 2024-12-12T19:32:11,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T19:32:11,964 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=6ffb87fb734b5d4ed7499f1da86f79f5, regionState=OPENING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:11,988 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:32:12,147 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:12,163 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:12,164 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} 2024-12-12T19:32:12,164 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:12,165 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:32:12,165 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:12,165 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:12,168 INFO [StoreOpener-6ffb87fb734b5d4ed7499f1da86f79f5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:12,177 INFO [StoreOpener-6ffb87fb734b5d4ed7499f1da86f79f5-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:32:12,178 INFO [StoreOpener-6ffb87fb734b5d4ed7499f1da86f79f5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6ffb87fb734b5d4ed7499f1da86f79f5 columnFamilyName A 2024-12-12T19:32:12,178 DEBUG [StoreOpener-6ffb87fb734b5d4ed7499f1da86f79f5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:12,187 INFO [StoreOpener-6ffb87fb734b5d4ed7499f1da86f79f5-1 {}] regionserver.HStore(327): Store=6ffb87fb734b5d4ed7499f1da86f79f5/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:32:12,188 INFO [StoreOpener-6ffb87fb734b5d4ed7499f1da86f79f5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:12,191 INFO [StoreOpener-6ffb87fb734b5d4ed7499f1da86f79f5-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:32:12,191 INFO [StoreOpener-6ffb87fb734b5d4ed7499f1da86f79f5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6ffb87fb734b5d4ed7499f1da86f79f5 columnFamilyName B 2024-12-12T19:32:12,191 DEBUG [StoreOpener-6ffb87fb734b5d4ed7499f1da86f79f5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:12,195 INFO [StoreOpener-6ffb87fb734b5d4ed7499f1da86f79f5-1 {}] regionserver.HStore(327): Store=6ffb87fb734b5d4ed7499f1da86f79f5/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:32:12,196 INFO [StoreOpener-6ffb87fb734b5d4ed7499f1da86f79f5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:12,202 INFO [StoreOpener-6ffb87fb734b5d4ed7499f1da86f79f5-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:32:12,203 INFO [StoreOpener-6ffb87fb734b5d4ed7499f1da86f79f5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6ffb87fb734b5d4ed7499f1da86f79f5 columnFamilyName C 2024-12-12T19:32:12,203 DEBUG [StoreOpener-6ffb87fb734b5d4ed7499f1da86f79f5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:12,208 INFO [StoreOpener-6ffb87fb734b5d4ed7499f1da86f79f5-1 {}] regionserver.HStore(327): Store=6ffb87fb734b5d4ed7499f1da86f79f5/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:32:12,218 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:12,221 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:12,222 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:12,226 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T19:32:12,231 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:12,248 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T19:32:12,249 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 6ffb87fb734b5d4ed7499f1da86f79f5; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74421625, jitterRate=0.10896863043308258}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T19:32:12,251 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:12,253 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., pid=11, masterSystemTime=1734031932147 2024-12-12T19:32:12,259 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:12,259 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:12,263 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=6ffb87fb734b5d4ed7499f1da86f79f5, regionState=OPEN, openSeqNum=2, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:12,276 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-12T19:32:12,276 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 in 280 msec 2024-12-12T19:32:12,284 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-12T19:32:12,284 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6ffb87fb734b5d4ed7499f1da86f79f5, ASSIGN in 509 msec 2024-12-12T19:32:12,286 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T19:32:12,287 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734031932286"}]},"ts":"1734031932286"} 2024-12-12T19:32:12,296 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T19:32:12,365 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T19:32:12,370 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.6410 sec 2024-12-12T19:32:12,559 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-12T19:32:12,560 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-12T19:32:12,577 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-12T19:32:12,577 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-12T19:32:12,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T19:32:12,587 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-12T19:32:12,591 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-12T19:32:12,591 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-12T19:32:12,596 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-12T19:32:12,596 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-12T19:32:12,947 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-12T19:32:12,949 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-12T19:32:12,950 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-12T19:32:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T19:32:12,972 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-12T19:32:12,978 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f6e36fe to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e98ea32 2024-12-12T19:32:13,052 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b9fcedf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:13,080 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:13,084 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48456, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:13,092 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T19:32:13,100 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48356, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T19:32:13,122 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f343a4d to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12885408 2024-12-12T19:32:13,164 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9bd0964, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:13,166 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22cb07dd to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72b32f98 2024-12-12T19:32:13,220 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18cb251d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:13,222 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x478bae6b to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4977266 2024-12-12T19:32:13,296 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45b55c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:13,298 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5400112e to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bbb5d8a 2024-12-12T19:32:13,372 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e52b42a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:13,374 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38766d64 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18603bb9 2024-12-12T19:32:13,398 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3883f7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:13,400 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x295cb1ac to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72e97e4b 2024-12-12T19:32:13,432 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a1285d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:13,434 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70267494 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@490457fd 2024-12-12T19:32:13,453 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527c6d40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:13,466 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d2a8e08 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c8de680 2024-12-12T19:32:13,493 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fe2fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:13,495 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c915d17 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f6b07e3 2024-12-12T19:32:13,535 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@595e9ebe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:13,556 DEBUG [hconnection-0x1edd6387-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:13,577 DEBUG [hconnection-0x74d9d707-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:13,580 DEBUG [hconnection-0x35a122fe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:13,582 DEBUG [hconnection-0x5a498ecf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:13,585 DEBUG [hconnection-0x5ada54c4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:13,586 DEBUG [hconnection-0x4f6d1bf7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:13,587 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48466, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:13,606 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:32:13,607 DEBUG [hconnection-0x44123a6a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:13,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-12T19:32:13,619 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:32:13,621 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:32:13,623 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:32:13,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T19:32:13,633 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48478, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:13,645 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48482, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:13,658 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48492, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:13,668 DEBUG [hconnection-0x1776b48e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:13,678 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48506, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:13,683 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48520, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:13,685 DEBUG [hconnection-0x50453993-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:13,688 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48530, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:13,697 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48532, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:13,721 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T19:32:13,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T19:32:13,764 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48544, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:13,795 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:32:13,808 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:13,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T19:32:13,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:13,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:13,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:13,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:13,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:13,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:13,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:13,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:13,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:13,816 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:13,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:13,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:13,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T19:32:13,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/15ea42ec201543c2abd09b2a7b9a673b is 50, key is test_row_0/A:col10/1734031933725/Put/seqid=0 2024-12-12T19:32:14,000 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T19:32:14,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:14,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:14,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:14,020 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031994013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031994024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031994026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031994029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031994033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741839_1015 (size=14341) 2024-12-12T19:32:14,084 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/15ea42ec201543c2abd09b2a7b9a673b 2024-12-12T19:32:14,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031994157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031994156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031994160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031994163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031994161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,175 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,176 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T19:32:14,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:14,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:14,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:14,199 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/ea9f18f99ffd4e4ca6cd8a982e550f3d is 50, key is test_row_0/B:col10/1734031933725/Put/seqid=0 2024-12-12T19:32:14,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T19:32:14,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741840_1016 (size=12001) 2024-12-12T19:32:14,294 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/ea9f18f99ffd4e4ca6cd8a982e550f3d 2024-12-12T19:32:14,360 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T19:32:14,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:14,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:14,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:14,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031994371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031994374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031994374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031994381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/1fc220ea582a4748872cd723c0cf5c8d is 50, key is test_row_0/C:col10/1734031933725/Put/seqid=0 2024-12-12T19:32:14,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031994387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741841_1017 (size=12001) 2024-12-12T19:32:14,550 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T19:32:14,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:14,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:14,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:14,552 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031994683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031994691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031994697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031994700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,715 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:14,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031994723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,731 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T19:32:14,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:14,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:14,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:14,736 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T19:32:14,842 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/1fc220ea582a4748872cd723c0cf5c8d 2024-12-12T19:32:14,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/15ea42ec201543c2abd09b2a7b9a673b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/15ea42ec201543c2abd09b2a7b9a673b 2024-12-12T19:32:14,881 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/15ea42ec201543c2abd09b2a7b9a673b, entries=200, sequenceid=14, filesize=14.0 K 2024-12-12T19:32:14,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/ea9f18f99ffd4e4ca6cd8a982e550f3d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ea9f18f99ffd4e4ca6cd8a982e550f3d 2024-12-12T19:32:14,905 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:14,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T19:32:14,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:14,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:14,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:14,912 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:14,936 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ea9f18f99ffd4e4ca6cd8a982e550f3d, entries=150, sequenceid=14, filesize=11.7 K 2024-12-12T19:32:14,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/1fc220ea582a4748872cd723c0cf5c8d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/1fc220ea582a4748872cd723c0cf5c8d 2024-12-12T19:32:14,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/1fc220ea582a4748872cd723c0cf5c8d, entries=150, sequenceid=14, filesize=11.7 K 2024-12-12T19:32:14,972 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 1178ms, sequenceid=14, compaction requested=false 2024-12-12T19:32:14,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:15,066 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T19:32:15,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:15,069 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T19:32:15,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:15,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:15,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:15,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:15,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:15,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:15,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/02619936cc5f4a2cb5a8f7fc8c7a4d16 is 50, key is test_row_0/A:col10/1734031934022/Put/seqid=0 2024-12-12T19:32:15,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741842_1018 (size=12001) 2024-12-12T19:32:15,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:15,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:15,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031995269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031995276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031995291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031995287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031995295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031995403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031995406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031995406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031995406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031995410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,583 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/02619936cc5f4a2cb5a8f7fc8c7a4d16 2024-12-12T19:32:15,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031995611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031995625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031995626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/8fcc61c7708444638d783a1353eaf488 is 50, key is test_row_0/B:col10/1734031934022/Put/seqid=0 2024-12-12T19:32:15,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031995627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031995617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741843_1019 (size=12001) 2024-12-12T19:32:15,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T19:32:15,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031995928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031995940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,955 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031995948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031995952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:15,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:15,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031995964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:16,101 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/8fcc61c7708444638d783a1353eaf488 2024-12-12T19:32:16,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/6ccdace211214f95b74d46ad9007dd93 is 50, key is test_row_0/C:col10/1734031934022/Put/seqid=0 2024-12-12T19:32:16,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741844_1020 (size=12001) 2024-12-12T19:32:16,196 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/6ccdace211214f95b74d46ad9007dd93 2024-12-12T19:32:16,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/02619936cc5f4a2cb5a8f7fc8c7a4d16 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/02619936cc5f4a2cb5a8f7fc8c7a4d16 2024-12-12T19:32:16,262 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/02619936cc5f4a2cb5a8f7fc8c7a4d16, entries=150, sequenceid=39, filesize=11.7 K 2024-12-12T19:32:16,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/8fcc61c7708444638d783a1353eaf488 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/8fcc61c7708444638d783a1353eaf488 2024-12-12T19:32:16,285 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/8fcc61c7708444638d783a1353eaf488, entries=150, sequenceid=39, filesize=11.7 K 2024-12-12T19:32:16,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/6ccdace211214f95b74d46ad9007dd93 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/6ccdace211214f95b74d46ad9007dd93 2024-12-12T19:32:16,303 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/6ccdace211214f95b74d46ad9007dd93, entries=150, sequenceid=39, filesize=11.7 K 2024-12-12T19:32:16,306 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 1237ms, sequenceid=39, compaction requested=false 2024-12-12T19:32:16,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:16,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:16,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-12T19:32:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-12T19:32:16,326 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-12T19:32:16,326 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6930 sec 2024-12-12T19:32:16,332 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 2.7200 sec 2024-12-12T19:32:16,453 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T19:32:16,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:16,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:16,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:16,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:16,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:16,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:16,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:16,482 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/e3992581ed774eaca0c901c4ea4321f4 is 50, key is test_row_0/A:col10/1734031935277/Put/seqid=0 2024-12-12T19:32:16,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741845_1021 (size=14341) 2024-12-12T19:32:16,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:16,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:16,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031996590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:16,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:16,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031996594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:16,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031996595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:16,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:16,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031996608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:16,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:16,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031996614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:16,740 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:16,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031996725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:16,742 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:16,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031996727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:16,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:16,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031996727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:16,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:16,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031996729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:16,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:16,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031996738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:16,920 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/e3992581ed774eaca0c901c4ea4321f4 2024-12-12T19:32:16,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:16,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031996951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:16,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:16,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031996951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:16,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:16,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031996957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:16,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:16,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031996960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:16,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:16,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031996965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:16,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/c533a685d12f4830b0348bc0ebf73a47 is 50, key is test_row_0/B:col10/1734031935277/Put/seqid=0 2024-12-12T19:32:17,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741846_1022 (size=12001) 2024-12-12T19:32:17,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:17,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031997272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:17,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:17,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031997276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:17,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:17,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031997285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:17,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:17,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031997285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:17,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:17,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031997286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:17,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/c533a685d12f4830b0348bc0ebf73a47 2024-12-12T19:32:17,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/e9b48ea1642e4f33a1d06cb8e7650176 is 50, key is test_row_0/C:col10/1734031935277/Put/seqid=0 2024-12-12T19:32:17,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741847_1023 (size=12001) 2024-12-12T19:32:17,557 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/e9b48ea1642e4f33a1d06cb8e7650176 2024-12-12T19:32:17,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/e3992581ed774eaca0c901c4ea4321f4 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/e3992581ed774eaca0c901c4ea4321f4 2024-12-12T19:32:17,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/e3992581ed774eaca0c901c4ea4321f4, entries=200, sequenceid=52, filesize=14.0 K 2024-12-12T19:32:17,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/c533a685d12f4830b0348bc0ebf73a47 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/c533a685d12f4830b0348bc0ebf73a47 2024-12-12T19:32:17,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/c533a685d12f4830b0348bc0ebf73a47, entries=150, sequenceid=52, filesize=11.7 K 2024-12-12T19:32:17,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/e9b48ea1642e4f33a1d06cb8e7650176 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/e9b48ea1642e4f33a1d06cb8e7650176 2024-12-12T19:32:17,671 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/e9b48ea1642e4f33a1d06cb8e7650176, entries=150, sequenceid=52, filesize=11.7 K 2024-12-12T19:32:17,673 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 1220ms, sequenceid=52, compaction requested=true 2024-12-12T19:32:17,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:17,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:32:17,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:17,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:32:17,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:17,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:32:17,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:17,678 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:17,678 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:17,683 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:17,685 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/B is initiating minor compaction (all files) 2024-12-12T19:32:17,685 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/B in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:17,686 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ea9f18f99ffd4e4ca6cd8a982e550f3d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/8fcc61c7708444638d783a1353eaf488, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/c533a685d12f4830b0348bc0ebf73a47] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=35.2 K 2024-12-12T19:32:17,686 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:17,686 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/A is initiating minor compaction (all files) 2024-12-12T19:32:17,687 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting ea9f18f99ffd4e4ca6cd8a982e550f3d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734031933725 2024-12-12T19:32:17,688 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/A in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:17,689 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fcc61c7708444638d783a1353eaf488, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1734031934012 2024-12-12T19:32:17,689 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/15ea42ec201543c2abd09b2a7b9a673b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/02619936cc5f4a2cb5a8f7fc8c7a4d16, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/e3992581ed774eaca0c901c4ea4321f4] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=39.7 K 2024-12-12T19:32:17,690 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting c533a685d12f4830b0348bc0ebf73a47, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734031935267 2024-12-12T19:32:17,698 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15ea42ec201543c2abd09b2a7b9a673b, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734031933715 2024-12-12T19:32:17,702 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02619936cc5f4a2cb5a8f7fc8c7a4d16, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1734031934012 2024-12-12T19:32:17,703 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3992581ed774eaca0c901c4ea4321f4, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734031935264 2024-12-12T19:32:17,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T19:32:17,764 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-12T19:32:17,767 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:32:17,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-12T19:32:17,771 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:32:17,773 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:32:17,773 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:32:17,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T19:32:17,797 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#A#compaction#10 average throughput is 0.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:17,799 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/7adbeb30faf443cc8edca842b91ae976 is 50, key is test_row_0/A:col10/1734031935277/Put/seqid=0 2024-12-12T19:32:17,805 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#B#compaction#9 average throughput is 0.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:17,806 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/ce97df282b6e4693a10870c3a85a46b9 is 50, key is test_row_0/B:col10/1734031935277/Put/seqid=0 2024-12-12T19:32:17,808 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T19:32:17,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:17,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:17,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:17,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:17,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:17,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:17,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:17,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741848_1024 (size=12104) 2024-12-12T19:32:17,846 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/b504c6cef4234ffaab46b80ed917bd1b is 50, key is test_row_0/A:col10/1734031936531/Put/seqid=0 2024-12-12T19:32:17,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T19:32:17,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741849_1025 (size=12104) 2024-12-12T19:32:17,885 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/7adbeb30faf443cc8edca842b91ae976 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/7adbeb30faf443cc8edca842b91ae976 2024-12-12T19:32:17,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741850_1026 (size=14341) 2024-12-12T19:32:17,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:17,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031997841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:17,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:17,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031997867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:17,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:17,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031997881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:17,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:17,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031997886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:17,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:17,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031997889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:17,909 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/ce97df282b6e4693a10870c3a85a46b9 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ce97df282b6e4693a10870c3a85a46b9 2024-12-12T19:32:17,917 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/A of 6ffb87fb734b5d4ed7499f1da86f79f5 into 7adbeb30faf443cc8edca842b91ae976(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:17,917 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:17,917 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/A, priority=13, startTime=1734031937675; duration=0sec 2024-12-12T19:32:17,917 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:17,918 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:A 2024-12-12T19:32:17,918 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:17,926 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:17,926 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/C is initiating minor compaction (all files) 2024-12-12T19:32:17,927 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:17,926 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/C in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:17,928 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/1fc220ea582a4748872cd723c0cf5c8d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/6ccdace211214f95b74d46ad9007dd93, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/e9b48ea1642e4f33a1d06cb8e7650176] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=35.2 K 2024-12-12T19:32:17,929 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/B of 6ffb87fb734b5d4ed7499f1da86f79f5 into ce97df282b6e4693a10870c3a85a46b9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:17,929 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:17,929 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/B, priority=13, startTime=1734031937677; duration=0sec 2024-12-12T19:32:17,930 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:17,930 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:B 2024-12-12T19:32:17,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T19:32:17,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:17,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:17,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:17,936 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1fc220ea582a4748872cd723c0cf5c8d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734031933725 2024-12-12T19:32:17,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:17,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:17,937 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/b504c6cef4234ffaab46b80ed917bd1b 2024-12-12T19:32:17,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:17,938 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ccdace211214f95b74d46ad9007dd93, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1734031934012 2024-12-12T19:32:17,941 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9b48ea1642e4f33a1d06cb8e7650176, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734031935267 2024-12-12T19:32:17,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/6a150395dacb43298ed8533f0399c665 is 50, key is test_row_0/B:col10/1734031936531/Put/seqid=0 2024-12-12T19:32:18,004 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#C#compaction#13 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:18,005 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/e9ac9572c8ef4b9e8bd77f0169e463fb is 50, key is test_row_0/C:col10/1734031935277/Put/seqid=0 2024-12-12T19:32:18,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031998000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031998007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031998009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031998013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031998014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741852_1028 (size=12104) 2024-12-12T19:32:18,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741851_1027 (size=12001) 2024-12-12T19:32:18,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T19:32:18,081 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/6a150395dacb43298ed8533f0399c665 2024-12-12T19:32:18,089 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T19:32:18,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:18,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:18,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:18,094 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:18,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:18,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:18,108 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/e9ac9572c8ef4b9e8bd77f0169e463fb as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/e9ac9572c8ef4b9e8bd77f0169e463fb 2024-12-12T19:32:18,123 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/404cf232a05240faa5bf98388ca9750e is 50, key is test_row_0/C:col10/1734031936531/Put/seqid=0 2024-12-12T19:32:18,124 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/C of 6ffb87fb734b5d4ed7499f1da86f79f5 into e9ac9572c8ef4b9e8bd77f0169e463fb(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:18,125 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:18,125 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/C, priority=13, startTime=1734031937677; duration=0sec 2024-12-12T19:32:18,125 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:18,125 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:C 2024-12-12T19:32:18,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741853_1029 (size=12001) 2024-12-12T19:32:18,193 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/404cf232a05240faa5bf98388ca9750e 2024-12-12T19:32:18,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031998224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/b504c6cef4234ffaab46b80ed917bd1b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b504c6cef4234ffaab46b80ed917bd1b 2024-12-12T19:32:18,233 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031998226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031998237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031998238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,252 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,253 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T19:32:18,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:18,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:18,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:18,253 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:18,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:18,255 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b504c6cef4234ffaab46b80ed917bd1b, entries=200, sequenceid=76, filesize=14.0 K 2024-12-12T19:32:18,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031998239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/6a150395dacb43298ed8533f0399c665 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/6a150395dacb43298ed8533f0399c665 2024-12-12T19:32:18,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:18,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/6a150395dacb43298ed8533f0399c665, entries=150, sequenceid=76, filesize=11.7 K 2024-12-12T19:32:18,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/404cf232a05240faa5bf98388ca9750e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/404cf232a05240faa5bf98388ca9750e 2024-12-12T19:32:18,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/404cf232a05240faa5bf98388ca9750e, entries=150, sequenceid=76, filesize=11.7 K 2024-12-12T19:32:18,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 513ms, sequenceid=76, compaction requested=false 2024-12-12T19:32:18,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:18,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T19:32:18,420 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,421 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T19:32:18,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:18,421 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:32:18,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:18,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:18,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:18,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:18,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:18,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:18,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/adfbbf51ea30449ab58677ceb156ebf9 is 50, key is test_row_0/A:col10/1734031937882/Put/seqid=0 2024-12-12T19:32:18,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741854_1030 (size=12001) 2024-12-12T19:32:18,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:18,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:18,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031998654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031998655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031998658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031998658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031998665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031998765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031998774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031998774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031998774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031998776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:18,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T19:32:18,906 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/adfbbf51ea30449ab58677ceb156ebf9 2024-12-12T19:32:18,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/947a2caf0f95463bb9a4bb4a2f42d01c is 50, key is test_row_0/B:col10/1734031937882/Put/seqid=0 2024-12-12T19:32:18,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:18,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031998978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:19,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:19,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031998987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:19,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:19,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031998997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:19,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:19,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031999000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:19,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:19,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031999004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:19,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741855_1031 (size=12001) 2024-12-12T19:32:19,027 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/947a2caf0f95463bb9a4bb4a2f42d01c 2024-12-12T19:32:19,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/8e308e420f1b4da28cffc877bc6ef5f9 is 50, key is test_row_0/C:col10/1734031937882/Put/seqid=0 2024-12-12T19:32:19,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741856_1032 (size=12001) 2024-12-12T19:32:19,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:19,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031999286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:19,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:19,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031999309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:19,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:19,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031999312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:19,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:19,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031999316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:19,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:19,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031999327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:19,520 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/8e308e420f1b4da28cffc877bc6ef5f9 2024-12-12T19:32:19,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/adfbbf51ea30449ab58677ceb156ebf9 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/adfbbf51ea30449ab58677ceb156ebf9 2024-12-12T19:32:19,575 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/adfbbf51ea30449ab58677ceb156ebf9, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T19:32:19,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/947a2caf0f95463bb9a4bb4a2f42d01c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/947a2caf0f95463bb9a4bb4a2f42d01c 2024-12-12T19:32:19,604 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/947a2caf0f95463bb9a4bb4a2f42d01c, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T19:32:19,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/8e308e420f1b4da28cffc877bc6ef5f9 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/8e308e420f1b4da28cffc877bc6ef5f9 2024-12-12T19:32:19,636 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/8e308e420f1b4da28cffc877bc6ef5f9, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T19:32:19,641 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 1219ms, sequenceid=91, compaction requested=true 2024-12-12T19:32:19,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:19,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:19,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-12T19:32:19,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-12T19:32:19,650 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-12T19:32:19,650 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8720 sec 2024-12-12T19:32:19,656 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.8850 sec 2024-12-12T19:32:19,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:19,819 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T19:32:19,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:19,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:19,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:19,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:19,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:19,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:19,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/4a6435fc814841c8baf266be2f803a22 is 50, key is test_row_0/A:col10/1734031939805/Put/seqid=0 2024-12-12T19:32:19,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741857_1033 (size=14341) 2024-12-12T19:32:19,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T19:32:19,890 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-12T19:32:19,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:19,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031999873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:19,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:19,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031999870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:19,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:19,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734031999888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:19,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:19,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734031999893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:19,903 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:32:19,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:19,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734031999901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:19,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-12T19:32:19,911 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:32:19,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T19:32:19,915 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:32:19,915 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:32:20,008 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:20,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734031999998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:20,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032000002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T19:32:20,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:20,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:20,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734031999993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032000005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:20,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032000010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,072 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,073 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T19:32:20,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:20,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:20,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:20,075 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T19:32:20,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:20,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032000217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:20,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032000218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:20,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032000220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,224 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:20,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032000220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,231 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T19:32:20,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:20,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:20,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032000240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:20,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:20,246 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/4a6435fc814841c8baf266be2f803a22 2024-12-12T19:32:20,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/153aff2a404445fbb9f925d6b88ce585 is 50, key is test_row_0/B:col10/1734031939805/Put/seqid=0 2024-12-12T19:32:20,407 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741858_1034 (size=12001) 2024-12-12T19:32:20,427 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T19:32:20,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:20,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:20,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:20,432 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,436 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/153aff2a404445fbb9f925d6b88ce585 2024-12-12T19:32:20,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/59c8ea0fae684376b2bb176542b27aea is 50, key is test_row_0/C:col10/1734031939805/Put/seqid=0 2024-12-12T19:32:20,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T19:32:20,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:20,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032000534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:20,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032000536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:20,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032000537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:20,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032000538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:20,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032000561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,591 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T19:32:20,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:20,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:20,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:20,597 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741859_1035 (size=12001) 2024-12-12T19:32:20,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,762 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,771 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T19:32:20,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:20,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:20,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:20,772 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,957 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:20,960 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T19:32:20,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:20,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:20,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:20,964 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:20,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:21,016 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/59c8ea0fae684376b2bb176542b27aea 2024-12-12T19:32:21,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T19:32:21,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:21,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032001048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:21,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:21,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032001052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:21,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:21,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032001068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:21,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:21,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:21,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032001076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:21,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032001073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:21,103 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/4a6435fc814841c8baf266be2f803a22 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/4a6435fc814841c8baf266be2f803a22 2024-12-12T19:32:21,127 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:21,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T19:32:21,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:21,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:21,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:21,129 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:21,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:21,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:21,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/4a6435fc814841c8baf266be2f803a22, entries=200, sequenceid=117, filesize=14.0 K 2024-12-12T19:32:21,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/153aff2a404445fbb9f925d6b88ce585 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/153aff2a404445fbb9f925d6b88ce585 2024-12-12T19:32:21,248 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/153aff2a404445fbb9f925d6b88ce585, entries=150, sequenceid=117, filesize=11.7 K 2024-12-12T19:32:21,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/59c8ea0fae684376b2bb176542b27aea as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/59c8ea0fae684376b2bb176542b27aea 2024-12-12T19:32:21,287 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:21,292 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T19:32:21,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:21,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:21,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:21,295 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:21,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:21,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:21,333 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/59c8ea0fae684376b2bb176542b27aea, entries=150, sequenceid=117, filesize=11.7 K 2024-12-12T19:32:21,340 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 1521ms, sequenceid=117, compaction requested=true 2024-12-12T19:32:21,340 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:21,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:32:21,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:21,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:32:21,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:32:21,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:32:21,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-12T19:32:21,341 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:32:21,341 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:32:21,455 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:21,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T19:32:21,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:21,457 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:32:21,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:21,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:21,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:21,458 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52787 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:32:21,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:21,458 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/A is initiating minor compaction (all files) 2024-12-12T19:32:21,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:21,458 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/A in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:21,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:21,458 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/7adbeb30faf443cc8edca842b91ae976, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b504c6cef4234ffaab46b80ed917bd1b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/adfbbf51ea30449ab58677ceb156ebf9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/4a6435fc814841c8baf266be2f803a22] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=51.5 K 2024-12-12T19:32:21,459 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:32:21,459 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/C is initiating minor compaction (all files) 2024-12-12T19:32:21,459 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/C in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:21,459 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/e9ac9572c8ef4b9e8bd77f0169e463fb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/404cf232a05240faa5bf98388ca9750e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/8e308e420f1b4da28cffc877bc6ef5f9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/59c8ea0fae684376b2bb176542b27aea] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=47.0 K 2024-12-12T19:32:21,461 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting e9ac9572c8ef4b9e8bd77f0169e463fb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734031935267 2024-12-12T19:32:21,462 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7adbeb30faf443cc8edca842b91ae976, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734031935267 2024-12-12T19:32:21,463 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting b504c6cef4234ffaab46b80ed917bd1b, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734031936531 2024-12-12T19:32:21,464 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 404cf232a05240faa5bf98388ca9750e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734031936531 2024-12-12T19:32:21,466 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e308e420f1b4da28cffc877bc6ef5f9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734031937828 2024-12-12T19:32:21,472 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting adfbbf51ea30449ab58677ceb156ebf9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734031937828 2024-12-12T19:32:21,473 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 59c8ea0fae684376b2bb176542b27aea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734031938655 2024-12-12T19:32:21,474 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a6435fc814841c8baf266be2f803a22, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734031938652 2024-12-12T19:32:21,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/614cdba6a5ce47388160d0b7bba4b2f1 is 50, key is test_row_0/A:col10/1734031939878/Put/seqid=0 2024-12-12T19:32:21,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741860_1036 (size=12001) 2024-12-12T19:32:21,544 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/614cdba6a5ce47388160d0b7bba4b2f1 2024-12-12T19:32:21,546 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#C#compaction#22 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:21,547 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/7b13b264606a470aa0786eb1f4799c18 is 50, key is test_row_0/C:col10/1734031939805/Put/seqid=0 2024-12-12T19:32:21,553 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#A#compaction#23 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:21,554 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/b5b694b359b44ef6b556eec99f93b78b is 50, key is test_row_0/A:col10/1734031939805/Put/seqid=0 2024-12-12T19:32:21,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741861_1037 (size=12241) 2024-12-12T19:32:21,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/57b2864237e24afe8ca39a76ff719c11 is 50, key is test_row_0/B:col10/1734031939878/Put/seqid=0 2024-12-12T19:32:21,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741862_1038 (size=12241) 2024-12-12T19:32:21,639 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/7b13b264606a470aa0786eb1f4799c18 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/7b13b264606a470aa0786eb1f4799c18 2024-12-12T19:32:21,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741863_1039 (size=12001) 2024-12-12T19:32:21,679 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/b5b694b359b44ef6b556eec99f93b78b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b5b694b359b44ef6b556eec99f93b78b 2024-12-12T19:32:21,687 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/57b2864237e24afe8ca39a76ff719c11 2024-12-12T19:32:21,688 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/C of 6ffb87fb734b5d4ed7499f1da86f79f5 into 7b13b264606a470aa0786eb1f4799c18(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:21,688 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:21,688 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/C, priority=12, startTime=1734031941341; duration=0sec 2024-12-12T19:32:21,689 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:21,689 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:C 2024-12-12T19:32:21,689 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:32:21,709 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:32:21,710 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/B is initiating minor compaction (all files) 2024-12-12T19:32:21,710 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/B in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:21,710 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ce97df282b6e4693a10870c3a85a46b9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/6a150395dacb43298ed8533f0399c665, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/947a2caf0f95463bb9a4bb4a2f42d01c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/153aff2a404445fbb9f925d6b88ce585] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=47.0 K 2024-12-12T19:32:21,720 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting ce97df282b6e4693a10870c3a85a46b9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734031935267 2024-12-12T19:32:21,724 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/A of 6ffb87fb734b5d4ed7499f1da86f79f5 into b5b694b359b44ef6b556eec99f93b78b(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:21,724 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:21,724 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/A, priority=12, startTime=1734031941340; duration=0sec 2024-12-12T19:32:21,724 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:21,724 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:A 2024-12-12T19:32:21,725 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a150395dacb43298ed8533f0399c665, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734031936531 2024-12-12T19:32:21,732 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 947a2caf0f95463bb9a4bb4a2f42d01c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734031937828 2024-12-12T19:32:21,738 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 153aff2a404445fbb9f925d6b88ce585, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734031938655 2024-12-12T19:32:21,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/ca238867812343259a80d1c93d6d4901 is 50, key is test_row_0/C:col10/1734031939878/Put/seqid=0 2024-12-12T19:32:21,812 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#B#compaction#26 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:21,813 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/b9e95e0ebfb44eea8de4a4ed316fcb1c is 50, key is test_row_0/B:col10/1734031939805/Put/seqid=0 2024-12-12T19:32:21,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741864_1040 (size=12001) 2024-12-12T19:32:21,819 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/ca238867812343259a80d1c93d6d4901 2024-12-12T19:32:21,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/614cdba6a5ce47388160d0b7bba4b2f1 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/614cdba6a5ce47388160d0b7bba4b2f1 2024-12-12T19:32:21,886 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/614cdba6a5ce47388160d0b7bba4b2f1, entries=150, sequenceid=129, filesize=11.7 K 2024-12-12T19:32:21,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741865_1041 (size=12241) 2024-12-12T19:32:21,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/57b2864237e24afe8ca39a76ff719c11 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/57b2864237e24afe8ca39a76ff719c11 2024-12-12T19:32:21,922 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/b9e95e0ebfb44eea8de4a4ed316fcb1c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/b9e95e0ebfb44eea8de4a4ed316fcb1c 2024-12-12T19:32:21,936 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/57b2864237e24afe8ca39a76ff719c11, entries=150, sequenceid=129, filesize=11.7 K 2024-12-12T19:32:21,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/ca238867812343259a80d1c93d6d4901 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/ca238867812343259a80d1c93d6d4901 2024-12-12T19:32:22,002 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/B of 6ffb87fb734b5d4ed7499f1da86f79f5 into b9e95e0ebfb44eea8de4a4ed316fcb1c(size=12.0 K), total size for store is 23.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:22,002 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:22,002 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/B, priority=12, startTime=1734031941340; duration=0sec 2024-12-12T19:32:22,002 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:22,002 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:B 2024-12-12T19:32:22,005 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/ca238867812343259a80d1c93d6d4901, entries=150, sequenceid=129, filesize=11.7 K 2024-12-12T19:32:22,013 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=0 B/0 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 551ms, sequenceid=129, compaction requested=false 2024-12-12T19:32:22,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:22,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:22,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-12T19:32:22,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-12T19:32:22,021 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-12T19:32:22,021 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1010 sec 2024-12-12T19:32:22,025 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 2.1190 sec 2024-12-12T19:32:22,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T19:32:22,026 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-12T19:32:22,037 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:32:22,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-12T19:32:22,042 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:32:22,043 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:32:22,044 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:32:22,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T19:32:22,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:22,146 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T19:32:22,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T19:32:22,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:22,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:22,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:22,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:22,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:22,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:22,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/de9f7997de974c15924bc84b5fc390e3 is 50, key is test_row_0/A:col10/1734031942112/Put/seqid=0 2024-12-12T19:32:22,199 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,200 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T19:32:22,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:22,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:22,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:22,201 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741866_1042 (size=12151) 2024-12-12T19:32:22,329 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032002298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032002319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032002322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032002323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032002326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T19:32:22,356 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,363 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T19:32:22,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:22,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:22,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:22,364 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032002443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032002448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032002449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032002452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032002484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,518 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T19:32:22,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:22,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:22,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:22,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=145 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/de9f7997de974c15924bc84b5fc390e3 2024-12-12T19:32:22,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T19:32:22,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032002661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032002664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,679 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T19:32:22,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032002674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:22,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:22,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:22,680 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,691 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/824fc214d8124c0a85df657c3adb1fb0 is 50, key is test_row_0/B:col10/1734031942112/Put/seqid=0 2024-12-12T19:32:22,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032002677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032002690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741867_1043 (size=12151) 2024-12-12T19:32:22,835 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T19:32:22,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:22,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:22,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:22,837 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032002973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,991 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T19:32:22,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:22,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:22,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:22,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032002988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:22,995 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:22,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:22,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032002988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:23,001 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032002999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032003006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,139 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=145 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/824fc214d8124c0a85df657c3adb1fb0 2024-12-12T19:32:23,155 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T19:32:23,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:23,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:23,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:23,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T19:32:23,157 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:23,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:23,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:23,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/38b5c08af0974447b8754ca5f1bc0b9c is 50, key is test_row_0/C:col10/1734031942112/Put/seqid=0 2024-12-12T19:32:23,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741868_1044 (size=12151) 2024-12-12T19:32:23,277 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=145 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/38b5c08af0974447b8754ca5f1bc0b9c 2024-12-12T19:32:23,312 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,313 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T19:32:23,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:23,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:23,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:23,314 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:23,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:23,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/de9f7997de974c15924bc84b5fc390e3 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/de9f7997de974c15924bc84b5fc390e3 2024-12-12T19:32:23,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:23,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/de9f7997de974c15924bc84b5fc390e3, entries=150, sequenceid=145, filesize=11.9 K 2024-12-12T19:32:23,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/824fc214d8124c0a85df657c3adb1fb0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/824fc214d8124c0a85df657c3adb1fb0 2024-12-12T19:32:23,417 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/824fc214d8124c0a85df657c3adb1fb0, entries=150, sequenceid=145, filesize=11.9 K 2024-12-12T19:32:23,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/38b5c08af0974447b8754ca5f1bc0b9c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/38b5c08af0974447b8754ca5f1bc0b9c 2024-12-12T19:32:23,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/38b5c08af0974447b8754ca5f1bc0b9c, entries=150, sequenceid=145, filesize=11.9 K 2024-12-12T19:32:23,473 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-12T19:32:23,473 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(992): StoreScanner already has the close lock. There is no need to updateReaders 2024-12-12T19:32:23,475 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 1331ms, sequenceid=145, compaction requested=true 2024-12-12T19:32:23,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:23,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:32:23,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:23,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:32:23,476 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:23,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:23,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:32:23,477 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,476 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:23,480 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T19:32:23,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:23,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:23,483 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T19:32:23,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:23,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:23,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:23,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:23,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:23,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:23,484 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:23,484 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/B is initiating minor compaction (all files) 2024-12-12T19:32:23,485 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/B in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:23,485 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/b9e95e0ebfb44eea8de4a4ed316fcb1c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/57b2864237e24afe8ca39a76ff719c11, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/824fc214d8124c0a85df657c3adb1fb0] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=35.5 K 2024-12-12T19:32:23,486 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:23,486 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/A is initiating minor compaction (all files) 2024-12-12T19:32:23,486 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/A in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:23,486 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b5b694b359b44ef6b556eec99f93b78b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/614cdba6a5ce47388160d0b7bba4b2f1, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/de9f7997de974c15924bc84b5fc390e3] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=35.5 K 2024-12-12T19:32:23,488 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9e95e0ebfb44eea8de4a4ed316fcb1c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734031938655 2024-12-12T19:32:23,488 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting b5b694b359b44ef6b556eec99f93b78b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734031938655 2024-12-12T19:32:23,489 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 614cdba6a5ce47388160d0b7bba4b2f1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734031939878 2024-12-12T19:32:23,489 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57b2864237e24afe8ca39a76ff719c11, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734031939878 2024-12-12T19:32:23,492 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 824fc214d8124c0a85df657c3adb1fb0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734031942112 2024-12-12T19:32:23,492 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting de9f7997de974c15924bc84b5fc390e3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734031942112 2024-12-12T19:32:23,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:23,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:23,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/43a3cb6f073c4b528629a4829f81c910 is 50, key is test_row_0/A:col10/1734031942323/Put/seqid=0 2024-12-12T19:32:23,556 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#A#compaction#31 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:23,556 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#B#compaction#32 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:23,557 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/cbda0861236945dd8a598a98714afc3f is 50, key is test_row_0/A:col10/1734031942112/Put/seqid=0 2024-12-12T19:32:23,557 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/a5faadbe07ae40339f4dcc65ab01c990 is 50, key is test_row_0/B:col10/1734031942112/Put/seqid=0 2024-12-12T19:32:23,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032003543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032003546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032003549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032003559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032003564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741869_1045 (size=12151) 2024-12-12T19:32:23,594 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/43a3cb6f073c4b528629a4829f81c910 2024-12-12T19:32:23,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741870_1046 (size=12493) 2024-12-12T19:32:23,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741871_1047 (size=12493) 2024-12-12T19:32:23,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/474402912cb34d2e8b52d088ccb00fdd is 50, key is test_row_0/B:col10/1734031942323/Put/seqid=0 2024-12-12T19:32:23,662 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/a5faadbe07ae40339f4dcc65ab01c990 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/a5faadbe07ae40339f4dcc65ab01c990 2024-12-12T19:32:23,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032003668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,684 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/B of 6ffb87fb734b5d4ed7499f1da86f79f5 into a5faadbe07ae40339f4dcc65ab01c990(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:23,684 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:23,684 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/B, priority=13, startTime=1734031943476; duration=0sec 2024-12-12T19:32:23,684 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:23,684 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:B 2024-12-12T19:32:23,684 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:23,691 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:23,691 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/C is initiating minor compaction (all files) 2024-12-12T19:32:23,691 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/C in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:23,691 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/7b13b264606a470aa0786eb1f4799c18, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/ca238867812343259a80d1c93d6d4901, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/38b5c08af0974447b8754ca5f1bc0b9c] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=35.5 K 2024-12-12T19:32:23,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032003672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032003673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,700 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b13b264606a470aa0786eb1f4799c18, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734031938655 2024-12-12T19:32:23,702 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca238867812343259a80d1c93d6d4901, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734031939878 2024-12-12T19:32:23,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,705 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38b5c08af0974447b8754ca5f1bc0b9c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734031942112 2024-12-12T19:32:23,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032003683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032003703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741872_1048 (size=12151) 2024-12-12T19:32:23,728 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/474402912cb34d2e8b52d088ccb00fdd 2024-12-12T19:32:23,786 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#C#compaction#34 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:23,787 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/7268bcda2e8b416f89f9477b83cf336c is 50, key is test_row_0/C:col10/1734031942112/Put/seqid=0 2024-12-12T19:32:23,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/1627466b70b94010b9a904725922995d is 50, key is test_row_0/C:col10/1734031942323/Put/seqid=0 2024-12-12T19:32:23,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741873_1049 (size=12493) 2024-12-12T19:32:23,884 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/7268bcda2e8b416f89f9477b83cf336c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/7268bcda2e8b416f89f9477b83cf336c 2024-12-12T19:32:23,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741874_1050 (size=12151) 2024-12-12T19:32:23,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032003888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032003897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032003906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,919 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/C of 6ffb87fb734b5d4ed7499f1da86f79f5 into 7268bcda2e8b416f89f9477b83cf336c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:23,920 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:23,920 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/C, priority=13, startTime=1734031943476; duration=0sec 2024-12-12T19:32:23,920 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:23,920 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:C 2024-12-12T19:32:23,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032003916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:23,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:23,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032003917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:24,116 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/cbda0861236945dd8a598a98714afc3f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/cbda0861236945dd8a598a98714afc3f 2024-12-12T19:32:24,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T19:32:24,168 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/A of 6ffb87fb734b5d4ed7499f1da86f79f5 into cbda0861236945dd8a598a98714afc3f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:24,168 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:24,168 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/A, priority=13, startTime=1734031943475; duration=0sec 2024-12-12T19:32:24,169 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:24,169 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:A 2024-12-12T19:32:24,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:24,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032004199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:24,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:24,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032004213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:24,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:24,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032004229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:24,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:24,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:24,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032004229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:24,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032004229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:24,294 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/1627466b70b94010b9a904725922995d 2024-12-12T19:32:24,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/43a3cb6f073c4b528629a4829f81c910 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/43a3cb6f073c4b528629a4829f81c910 2024-12-12T19:32:24,344 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/43a3cb6f073c4b528629a4829f81c910, entries=150, sequenceid=168, filesize=11.9 K 2024-12-12T19:32:24,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/474402912cb34d2e8b52d088ccb00fdd as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/474402912cb34d2e8b52d088ccb00fdd 2024-12-12T19:32:24,394 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/474402912cb34d2e8b52d088ccb00fdd, entries=150, sequenceid=168, filesize=11.9 K 2024-12-12T19:32:24,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/1627466b70b94010b9a904725922995d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/1627466b70b94010b9a904725922995d 2024-12-12T19:32:24,435 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/1627466b70b94010b9a904725922995d, entries=150, sequenceid=168, filesize=11.9 K 2024-12-12T19:32:24,438 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 955ms, sequenceid=168, compaction requested=false 2024-12-12T19:32:24,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:24,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:24,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-12T19:32:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-12T19:32:24,452 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-12T19:32:24,452 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3960 sec 2024-12-12T19:32:24,455 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 2.4160 sec 2024-12-12T19:32:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:24,743 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T19:32:24,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:24,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:24,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:24,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:24,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:24,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:24,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/54b81f64b55941b386b514d7eea196fa is 50, key is test_row_0/A:col10/1734031944727/Put/seqid=0 2024-12-12T19:32:24,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741875_1051 (size=14541) 2024-12-12T19:32:24,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:24,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032004940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:24,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:24,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032004945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:24,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:24,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032004952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:24,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:24,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032004960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:24,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:24,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032004956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:25,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:25,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032005074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:25,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:25,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:25,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032005076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:25,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032005077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:25,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:25,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032005077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:25,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:25,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032005078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:25,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=186 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/54b81f64b55941b386b514d7eea196fa 2024-12-12T19:32:25,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:25,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032005285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:25,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:25,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032005285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:25,292 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/7be4eb96e9bd406f8aaf4fbd4e398d1d is 50, key is test_row_0/B:col10/1734031944727/Put/seqid=0 2024-12-12T19:32:25,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:25,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032005286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:25,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:25,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032005296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:25,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:25,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032005296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:25,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741876_1052 (size=12151) 2024-12-12T19:32:25,343 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=186 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/7be4eb96e9bd406f8aaf4fbd4e398d1d 2024-12-12T19:32:25,437 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/fed750e3cf694848b40df127bacd7ae3 is 50, key is test_row_0/C:col10/1734031944727/Put/seqid=0 2024-12-12T19:32:25,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741877_1053 (size=12151) 2024-12-12T19:32:25,605 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:25,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032005601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:25,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032005601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:25,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:25,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032005604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:25,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032005612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:25,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:25,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032005616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:25,897 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=186 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/fed750e3cf694848b40df127bacd7ae3 2024-12-12T19:32:25,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/54b81f64b55941b386b514d7eea196fa as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/54b81f64b55941b386b514d7eea196fa 2024-12-12T19:32:25,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/54b81f64b55941b386b514d7eea196fa, entries=200, sequenceid=186, filesize=14.2 K 2024-12-12T19:32:25,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/7be4eb96e9bd406f8aaf4fbd4e398d1d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/7be4eb96e9bd406f8aaf4fbd4e398d1d 2024-12-12T19:32:26,023 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/7be4eb96e9bd406f8aaf4fbd4e398d1d, entries=150, sequenceid=186, filesize=11.9 K 2024-12-12T19:32:26,023 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-12T19:32:26,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/fed750e3cf694848b40df127bacd7ae3 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/fed750e3cf694848b40df127bacd7ae3 2024-12-12T19:32:26,072 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/fed750e3cf694848b40df127bacd7ae3, entries=150, sequenceid=186, filesize=11.9 K 2024-12-12T19:32:26,081 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 1339ms, sequenceid=186, compaction requested=true 2024-12-12T19:32:26,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:26,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:32:26,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:26,081 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:26,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:32:26,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:26,081 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:26,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:32:26,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:26,084 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39185 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:26,084 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/A is initiating minor compaction (all files) 2024-12-12T19:32:26,085 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/A in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:26,085 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/cbda0861236945dd8a598a98714afc3f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/43a3cb6f073c4b528629a4829f81c910, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/54b81f64b55941b386b514d7eea196fa] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=38.3 K 2024-12-12T19:32:26,086 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting cbda0861236945dd8a598a98714afc3f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734031942112 2024-12-12T19:32:26,087 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 43a3cb6f073c4b528629a4829f81c910, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734031942300 2024-12-12T19:32:26,088 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:26,089 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/B is initiating minor compaction (all files) 2024-12-12T19:32:26,089 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/B in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:26,089 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/a5faadbe07ae40339f4dcc65ab01c990, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/474402912cb34d2e8b52d088ccb00fdd, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/7be4eb96e9bd406f8aaf4fbd4e398d1d] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=35.9 K 2024-12-12T19:32:26,090 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 54b81f64b55941b386b514d7eea196fa, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1734031943544 2024-12-12T19:32:26,090 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5faadbe07ae40339f4dcc65ab01c990, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734031942112 2024-12-12T19:32:26,092 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 474402912cb34d2e8b52d088ccb00fdd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734031942300 2024-12-12T19:32:26,094 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7be4eb96e9bd406f8aaf4fbd4e398d1d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1734031943544 2024-12-12T19:32:26,113 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#A#compaction#39 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:26,114 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/d7aef9e0c24744908134d68bd484696f is 50, key is test_row_0/A:col10/1734031944727/Put/seqid=0 2024-12-12T19:32:26,121 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#B#compaction#40 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:26,122 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/ba2c5e908cdd42058683e6ca34df00dc is 50, key is test_row_0/B:col10/1734031944727/Put/seqid=0 2024-12-12T19:32:26,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:26,143 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T19:32:26,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:26,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:26,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:26,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:26,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:26,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:26,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T19:32:26,165 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-12T19:32:26,168 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:32:26,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-12T19:32:26,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T19:32:26,174 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:32:26,176 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:32:26,177 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:32:26,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741879_1055 (size=12595) 2024-12-12T19:32:26,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032006182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032006195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032006200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032006204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032006205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,206 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/da14247a4cf04d01b461cf9e545fe2b6 is 50, key is test_row_0/A:col10/1734031944929/Put/seqid=0 2024-12-12T19:32:26,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741878_1054 (size=12595) 2024-12-12T19:32:26,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741880_1056 (size=12151) 2024-12-12T19:32:26,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T19:32:26,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032006306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032006309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032006308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032006309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032006316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,334 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,343 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T19:32:26,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:26,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:26,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:26,344 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:26,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:26,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:26,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T19:32:26,500 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T19:32:26,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:26,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:26,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:26,504 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:26,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:26,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:26,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032006512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032006515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032006517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032006528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032006540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,631 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/ba2c5e908cdd42058683e6ca34df00dc as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ba2c5e908cdd42058683e6ca34df00dc 2024-12-12T19:32:26,647 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/da14247a4cf04d01b461cf9e545fe2b6 2024-12-12T19:32:26,665 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,668 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T19:32:26,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:26,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:26,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:26,668 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:26,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:26,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:26,679 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/d7aef9e0c24744908134d68bd484696f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/d7aef9e0c24744908134d68bd484696f 2024-12-12T19:32:26,681 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/B of 6ffb87fb734b5d4ed7499f1da86f79f5 into ba2c5e908cdd42058683e6ca34df00dc(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:26,681 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:26,681 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/B, priority=13, startTime=1734031946081; duration=0sec 2024-12-12T19:32:26,681 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:26,681 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:B 2024-12-12T19:32:26,681 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:26,693 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:26,693 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/C is initiating minor compaction (all files) 2024-12-12T19:32:26,694 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/C in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:26,694 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/7268bcda2e8b416f89f9477b83cf336c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/1627466b70b94010b9a904725922995d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/fed750e3cf694848b40df127bacd7ae3] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=35.9 K 2024-12-12T19:32:26,698 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7268bcda2e8b416f89f9477b83cf336c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734031942112 2024-12-12T19:32:26,703 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/b53014c90f9344a8af1f371a940fddfd is 50, key is test_row_0/B:col10/1734031944929/Put/seqid=0 2024-12-12T19:32:26,707 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1627466b70b94010b9a904725922995d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734031942300 2024-12-12T19:32:26,713 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting fed750e3cf694848b40df127bacd7ae3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1734031943544 2024-12-12T19:32:26,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741881_1057 (size=12151) 2024-12-12T19:32:26,733 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/b53014c90f9344a8af1f371a940fddfd 2024-12-12T19:32:26,744 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/A of 6ffb87fb734b5d4ed7499f1da86f79f5 into d7aef9e0c24744908134d68bd484696f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:26,744 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:26,744 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/A, priority=13, startTime=1734031946081; duration=0sec 2024-12-12T19:32:26,744 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:26,744 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:A 2024-12-12T19:32:26,745 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#C#compaction#43 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:26,746 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/b3354e136b2f49a59584f3e1dfcc1aa0 is 50, key is test_row_0/C:col10/1734031944727/Put/seqid=0 2024-12-12T19:32:26,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T19:32:26,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/7660103287c2422e8acbf811add3497b is 50, key is test_row_0/C:col10/1734031944929/Put/seqid=0 2024-12-12T19:32:26,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741882_1058 (size=12595) 2024-12-12T19:32:26,826 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T19:32:26,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:26,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032006824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:26,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:26,828 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:26,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:26,829 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/b3354e136b2f49a59584f3e1dfcc1aa0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/b3354e136b2f49a59584f3e1dfcc1aa0 2024-12-12T19:32:26,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032006823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032006829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:26,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032006844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:26,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032006847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741883_1059 (size=12151) 2024-12-12T19:32:26,856 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/7660103287c2422e8acbf811add3497b 2024-12-12T19:32:26,874 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/C of 6ffb87fb734b5d4ed7499f1da86f79f5 into b3354e136b2f49a59584f3e1dfcc1aa0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:26,874 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:26,874 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/C, priority=13, startTime=1734031946081; duration=0sec 2024-12-12T19:32:26,874 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:26,874 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:C 2024-12-12T19:32:26,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/da14247a4cf04d01b461cf9e545fe2b6 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/da14247a4cf04d01b461cf9e545fe2b6 2024-12-12T19:32:26,886 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/da14247a4cf04d01b461cf9e545fe2b6, entries=150, sequenceid=209, filesize=11.9 K 2024-12-12T19:32:26,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/b53014c90f9344a8af1f371a940fddfd as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/b53014c90f9344a8af1f371a940fddfd 2024-12-12T19:32:26,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/b53014c90f9344a8af1f371a940fddfd, entries=150, sequenceid=209, filesize=11.9 K 2024-12-12T19:32:26,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/7660103287c2422e8acbf811add3497b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/7660103287c2422e8acbf811add3497b 2024-12-12T19:32:26,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/7660103287c2422e8acbf811add3497b, entries=150, sequenceid=209, filesize=11.9 K 2024-12-12T19:32:26,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 788ms, sequenceid=209, compaction requested=false 2024-12-12T19:32:26,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:26,996 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:26,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T19:32:26,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:26,999 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T19:32:26,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:26,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:26,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:27,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:27,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:27,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:27,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/b38da28754cf4591a26838dd014b3e4b is 50, key is test_row_0/A:col10/1734031946178/Put/seqid=0 2024-12-12T19:32:27,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741884_1060 (size=12151) 2024-12-12T19:32:27,039 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=225 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/b38da28754cf4591a26838dd014b3e4b 2024-12-12T19:32:27,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/e53e67affbe14ccabcc1f6787c019dd4 is 50, key is test_row_0/B:col10/1734031946178/Put/seqid=0 2024-12-12T19:32:27,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741885_1061 (size=12151) 2024-12-12T19:32:27,094 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=225 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/e53e67affbe14ccabcc1f6787c019dd4 2024-12-12T19:32:27,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/47c2929223dd45ad89645f72bf0b9e3e is 50, key is test_row_0/C:col10/1734031946178/Put/seqid=0 2024-12-12T19:32:27,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741886_1062 (size=12151) 2024-12-12T19:32:27,128 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=225 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/47c2929223dd45ad89645f72bf0b9e3e 2024-12-12T19:32:27,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/b38da28754cf4591a26838dd014b3e4b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b38da28754cf4591a26838dd014b3e4b 2024-12-12T19:32:27,156 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b38da28754cf4591a26838dd014b3e4b, entries=150, sequenceid=225, filesize=11.9 K 2024-12-12T19:32:27,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/e53e67affbe14ccabcc1f6787c019dd4 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/e53e67affbe14ccabcc1f6787c019dd4 2024-12-12T19:32:27,174 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/e53e67affbe14ccabcc1f6787c019dd4, entries=150, sequenceid=225, filesize=11.9 K 2024-12-12T19:32:27,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/47c2929223dd45ad89645f72bf0b9e3e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/47c2929223dd45ad89645f72bf0b9e3e 2024-12-12T19:32:27,200 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/47c2929223dd45ad89645f72bf0b9e3e, entries=150, sequenceid=225, filesize=11.9 K 2024-12-12T19:32:27,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-12T19:32:27,215 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 216ms, sequenceid=225, compaction requested=true 2024-12-12T19:32:27,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:27,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:27,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-12T19:32:27,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-12T19:32:27,230 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-12T19:32:27,230 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0420 sec 2024-12-12T19:32:27,243 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.0640 sec 2024-12-12T19:32:27,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T19:32:27,285 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-12T19:32:27,288 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:32:27,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-12T19:32:27,291 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:32:27,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T19:32:27,293 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:32:27,293 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:32:27,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T19:32:27,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:27,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T19:32:27,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:27,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:27,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:27,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:27,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:27,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:27,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/8ee69ed1e29944fbbf125cc7b4f24b95 is 50, key is test_row_0/A:col10/1734031947353/Put/seqid=0 2024-12-12T19:32:27,445 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,450 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T19:32:27,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:27,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:27,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:27,451 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:27,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:27,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:27,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741887_1063 (size=12151) 2024-12-12T19:32:27,506 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/8ee69ed1e29944fbbf125cc7b4f24b95 2024-12-12T19:32:27,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032007502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032007506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032007511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032007518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,533 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/d81d484da1384ca3a52ae40bd4e70744 is 50, key is test_row_0/B:col10/1734031947353/Put/seqid=0 2024-12-12T19:32:27,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032007527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741888_1064 (size=12151) 2024-12-12T19:32:27,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/d81d484da1384ca3a52ae40bd4e70744 2024-12-12T19:32:27,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T19:32:27,605 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/9bff5682d2be4461b0a6cd68c06bc330 is 50, key is test_row_0/C:col10/1734031947353/Put/seqid=0 2024-12-12T19:32:27,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T19:32:27,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:27,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:27,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:27,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:27,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:27,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:27,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032007624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032007628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032007635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032007652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032007652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741889_1065 (size=12151) 2024-12-12T19:32:27,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/9bff5682d2be4461b0a6cd68c06bc330 2024-12-12T19:32:27,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/8ee69ed1e29944fbbf125cc7b4f24b95 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/8ee69ed1e29944fbbf125cc7b4f24b95 2024-12-12T19:32:27,722 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/8ee69ed1e29944fbbf125cc7b4f24b95, entries=150, sequenceid=238, filesize=11.9 K 2024-12-12T19:32:27,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/d81d484da1384ca3a52ae40bd4e70744 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/d81d484da1384ca3a52ae40bd4e70744 2024-12-12T19:32:27,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/d81d484da1384ca3a52ae40bd4e70744, entries=150, sequenceid=238, filesize=11.9 K 2024-12-12T19:32:27,765 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/9bff5682d2be4461b0a6cd68c06bc330 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/9bff5682d2be4461b0a6cd68c06bc330 2024-12-12T19:32:27,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T19:32:27,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:27,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:27,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:27,767 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:27,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:27,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:27,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/9bff5682d2be4461b0a6cd68c06bc330, entries=150, sequenceid=238, filesize=11.9 K 2024-12-12T19:32:27,783 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 371ms, sequenceid=238, compaction requested=true 2024-12-12T19:32:27,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:27,783 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:32:27,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:32:27,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:27,783 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:32:27,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:32:27,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:27,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:32:27,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:27,793 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:32:27,793 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:32:27,793 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/A is initiating minor compaction (all files) 2024-12-12T19:32:27,793 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/B is initiating minor compaction (all files) 2024-12-12T19:32:27,793 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/A in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:27,793 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/B in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:27,793 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/d7aef9e0c24744908134d68bd484696f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/da14247a4cf04d01b461cf9e545fe2b6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b38da28754cf4591a26838dd014b3e4b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/8ee69ed1e29944fbbf125cc7b4f24b95] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=47.9 K 2024-12-12T19:32:27,793 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ba2c5e908cdd42058683e6ca34df00dc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/b53014c90f9344a8af1f371a940fddfd, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/e53e67affbe14ccabcc1f6787c019dd4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/d81d484da1384ca3a52ae40bd4e70744] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=47.9 K 2024-12-12T19:32:27,794 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7aef9e0c24744908134d68bd484696f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1734031943544 2024-12-12T19:32:27,794 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting ba2c5e908cdd42058683e6ca34df00dc, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1734031943544 2024-12-12T19:32:27,794 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting da14247a4cf04d01b461cf9e545fe2b6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1734031944929 2024-12-12T19:32:27,795 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting b53014c90f9344a8af1f371a940fddfd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1734031944929 2024-12-12T19:32:27,795 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting b38da28754cf4591a26838dd014b3e4b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=225, earliestPutTs=1734031946178 2024-12-12T19:32:27,795 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting e53e67affbe14ccabcc1f6787c019dd4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=225, earliestPutTs=1734031946178 2024-12-12T19:32:27,796 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ee69ed1e29944fbbf125cc7b4f24b95, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734031947353 2024-12-12T19:32:27,813 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting d81d484da1384ca3a52ae40bd4e70744, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734031947353 2024-12-12T19:32:27,843 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T19:32:27,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:27,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:27,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:27,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:27,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:27,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:27,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:27,848 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#A#compaction#51 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:27,848 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#B#compaction#52 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:27,849 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/bbac304451044fff8d0b2a1f4c5035f9 is 50, key is test_row_0/B:col10/1734031947353/Put/seqid=0 2024-12-12T19:32:27,852 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/9b49ac1af8e140b28075902a23dd37c7 is 50, key is test_row_0/A:col10/1734031947353/Put/seqid=0 2024-12-12T19:32:27,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/8062aed84a5f4803a23807359fd3d357 is 50, key is test_row_0/A:col10/1734031947502/Put/seqid=0 2024-12-12T19:32:27,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032007866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032007864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032007877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032007880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032007881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T19:32:27,933 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T19:32:27,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:27,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:27,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:27,937 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:27,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:27,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:27,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741891_1067 (size=12731) 2024-12-12T19:32:27,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741890_1066 (size=12731) 2024-12-12T19:32:27,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741892_1068 (size=14741) 2024-12-12T19:32:27,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032007983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032007983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032007988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032007992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:27,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:27,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032007993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,098 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T19:32:28,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:28,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:28,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:28,106 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:28,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:28,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032008204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032008205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:28,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032008205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:28,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032008211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:28,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032008210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,275 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,277 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T19:32:28,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:28,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:28,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:28,278 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,355 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/9b49ac1af8e140b28075902a23dd37c7 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/9b49ac1af8e140b28075902a23dd37c7 2024-12-12T19:32:28,362 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/bbac304451044fff8d0b2a1f4c5035f9 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/bbac304451044fff8d0b2a1f4c5035f9 2024-12-12T19:32:28,378 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/B of 6ffb87fb734b5d4ed7499f1da86f79f5 into bbac304451044fff8d0b2a1f4c5035f9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:28,379 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:28,379 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/B, priority=12, startTime=1734031947783; duration=0sec 2024-12-12T19:32:28,379 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:28,379 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:B 2024-12-12T19:32:28,379 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:32:28,382 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:32:28,382 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/C is initiating minor compaction (all files) 2024-12-12T19:32:28,382 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/C in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:28,382 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/b3354e136b2f49a59584f3e1dfcc1aa0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/7660103287c2422e8acbf811add3497b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/47c2929223dd45ad89645f72bf0b9e3e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/9bff5682d2be4461b0a6cd68c06bc330] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=47.9 K 2024-12-12T19:32:28,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/8062aed84a5f4803a23807359fd3d357 2024-12-12T19:32:28,388 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting b3354e136b2f49a59584f3e1dfcc1aa0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1734031943544 2024-12-12T19:32:28,388 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/A of 6ffb87fb734b5d4ed7499f1da86f79f5 into 9b49ac1af8e140b28075902a23dd37c7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:28,388 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:28,388 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/A, priority=12, startTime=1734031947783; duration=0sec 2024-12-12T19:32:28,388 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:28,388 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:A 2024-12-12T19:32:28,389 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 7660103287c2422e8acbf811add3497b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1734031944929 2024-12-12T19:32:28,389 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 47c2929223dd45ad89645f72bf0b9e3e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=225, earliestPutTs=1734031946178 2024-12-12T19:32:28,390 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bff5682d2be4461b0a6cd68c06bc330, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734031947353 2024-12-12T19:32:28,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T19:32:28,408 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/fe5a74bd31594c919c0ea3d5c7a009ce is 50, key is test_row_0/B:col10/1734031947502/Put/seqid=0 2024-12-12T19:32:28,438 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#C#compaction#55 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:28,438 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/33c5af4694e44a719f9294561c638442 is 50, key is test_row_0/C:col10/1734031947353/Put/seqid=0 2024-12-12T19:32:28,446 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T19:32:28,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:28,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:28,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:28,448 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741893_1069 (size=12301) 2024-12-12T19:32:28,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741894_1070 (size=12731) 2024-12-12T19:32:28,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:28,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032008510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:28,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032008510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:28,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032008518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,523 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:28,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032008521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:28,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032008557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,600 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T19:32:28,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:28,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:28,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:28,604 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,757 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,758 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T19:32:28,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:28,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:28,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:28,758 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,864 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/fe5a74bd31594c919c0ea3d5c7a009ce 2024-12-12T19:32:28,889 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/5dfde6d47c124822b33efaa9cf3705b0 is 50, key is test_row_0/C:col10/1734031947502/Put/seqid=0 2024-12-12T19:32:28,914 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:28,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T19:32:28,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:28,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:28,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:28,916 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:28,943 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/33c5af4694e44a719f9294561c638442 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/33c5af4694e44a719f9294561c638442 2024-12-12T19:32:28,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741895_1071 (size=12301) 2024-12-12T19:32:28,971 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/5dfde6d47c124822b33efaa9cf3705b0 2024-12-12T19:32:28,974 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/C of 6ffb87fb734b5d4ed7499f1da86f79f5 into 33c5af4694e44a719f9294561c638442(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:28,974 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:28,974 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/C, priority=12, startTime=1734031947784; duration=0sec 2024-12-12T19:32:28,974 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:28,974 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:C 2024-12-12T19:32:28,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/8062aed84a5f4803a23807359fd3d357 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/8062aed84a5f4803a23807359fd3d357 2024-12-12T19:32:29,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/8062aed84a5f4803a23807359fd3d357, entries=200, sequenceid=264, filesize=14.4 K 2024-12-12T19:32:29,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/fe5a74bd31594c919c0ea3d5c7a009ce as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/fe5a74bd31594c919c0ea3d5c7a009ce 2024-12-12T19:32:29,014 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/fe5a74bd31594c919c0ea3d5c7a009ce, entries=150, sequenceid=264, filesize=12.0 K 2024-12-12T19:32:29,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/5dfde6d47c124822b33efaa9cf3705b0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/5dfde6d47c124822b33efaa9cf3705b0 2024-12-12T19:32:29,029 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/5dfde6d47c124822b33efaa9cf3705b0, entries=150, sequenceid=264, filesize=12.0 K 2024-12-12T19:32:29,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 1188ms, sequenceid=264, compaction requested=false 2024-12-12T19:32:29,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:29,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:29,051 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T19:32:29,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:29,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:29,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:29,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:29,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:29,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:29,097 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/9bc35dfc884d49f4abaca8b58228007c is 50, key is test_row_0/A:col10/1734031949041/Put/seqid=0 2024-12-12T19:32:29,121 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,122 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T19:32:29,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:29,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:29,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:29,123 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:29,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:29,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:29,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032009136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032009133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032009137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032009147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032009151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741896_1072 (size=12301) 2024-12-12T19:32:29,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/9bc35dfc884d49f4abaca8b58228007c 2024-12-12T19:32:29,202 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/e82afb77f36042bdb0bf9a85b88b7ea1 is 50, key is test_row_0/B:col10/1734031949041/Put/seqid=0 2024-12-12T19:32:29,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741897_1073 (size=12301) 2024-12-12T19:32:29,228 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/e82afb77f36042bdb0bf9a85b88b7ea1 2024-12-12T19:32:29,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032009254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032009260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032009257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032009266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,280 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,281 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/45532d66faef4bb58076a135e7ee4f30 is 50, key is test_row_0/C:col10/1734031949041/Put/seqid=0 2024-12-12T19:32:29,281 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T19:32:29,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:29,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:29,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:29,282 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:29,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:29,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:29,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032009279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741898_1074 (size=12301) 2024-12-12T19:32:29,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T19:32:29,435 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T19:32:29,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:29,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:29,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:29,436 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:29,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:29,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:29,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032009458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032009469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032009477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032009478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032009490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,588 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T19:32:29,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:29,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:29,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:29,590 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:29,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:29,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:29,731 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/45532d66faef4bb58076a135e7ee4f30 2024-12-12T19:32:29,748 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,748 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T19:32:29,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:29,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:29,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:29,750 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:29,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:29,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:29,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/9bc35dfc884d49f4abaca8b58228007c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/9bc35dfc884d49f4abaca8b58228007c 2024-12-12T19:32:29,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032009760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,776 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/9bc35dfc884d49f4abaca8b58228007c, entries=150, sequenceid=280, filesize=12.0 K 2024-12-12T19:32:29,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032009775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/e82afb77f36042bdb0bf9a85b88b7ea1 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/e82afb77f36042bdb0bf9a85b88b7ea1 2024-12-12T19:32:29,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032009784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/e82afb77f36042bdb0bf9a85b88b7ea1, entries=150, sequenceid=280, filesize=12.0 K 2024-12-12T19:32:29,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032009783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/45532d66faef4bb58076a135e7ee4f30 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/45532d66faef4bb58076a135e7ee4f30 2024-12-12T19:32:29,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:29,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032009805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,824 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/45532d66faef4bb58076a135e7ee4f30, entries=150, sequenceid=280, filesize=12.0 K 2024-12-12T19:32:29,826 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 774ms, sequenceid=280, compaction requested=true 2024-12-12T19:32:29,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:29,826 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:29,830 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39773 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:29,830 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/A is initiating minor compaction (all files) 2024-12-12T19:32:29,830 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/A in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:29,831 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/9b49ac1af8e140b28075902a23dd37c7, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/8062aed84a5f4803a23807359fd3d357, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/9bc35dfc884d49f4abaca8b58228007c] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=38.8 K 2024-12-12T19:32:29,831 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b49ac1af8e140b28075902a23dd37c7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734031947353 2024-12-12T19:32:29,834 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8062aed84a5f4803a23807359fd3d357, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1734031947502 2024-12-12T19:32:29,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:32:29,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:29,834 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:29,834 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9bc35dfc884d49f4abaca8b58228007c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1734031947876 2024-12-12T19:32:29,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:32:29,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:29,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:32:29,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:29,840 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:29,840 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/B is initiating minor compaction (all files) 2024-12-12T19:32:29,840 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/B in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:29,840 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/bbac304451044fff8d0b2a1f4c5035f9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/fe5a74bd31594c919c0ea3d5c7a009ce, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/e82afb77f36042bdb0bf9a85b88b7ea1] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=36.5 K 2024-12-12T19:32:29,841 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting bbac304451044fff8d0b2a1f4c5035f9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734031947353 2024-12-12T19:32:29,842 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting fe5a74bd31594c919c0ea3d5c7a009ce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1734031947502 2024-12-12T19:32:29,842 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting e82afb77f36042bdb0bf9a85b88b7ea1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1734031947876 2024-12-12T19:32:29,866 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#A#compaction#60 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:29,867 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/932d287f7ff44e2fbef5176a1aceaf2d is 50, key is test_row_0/A:col10/1734031949041/Put/seqid=0 2024-12-12T19:32:29,871 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#B#compaction#61 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:29,871 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/30e3cd75feaf4b518f8d506b3f6aad8f is 50, key is test_row_0/B:col10/1734031949041/Put/seqid=0 2024-12-12T19:32:29,902 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:29,904 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T19:32:29,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:29,904 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T19:32:29,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:29,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:29,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:29,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:29,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:29,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:29,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/78a1e7cc6d0f4be69b3905b27db6f55a is 50, key is test_row_0/A:col10/1734031949128/Put/seqid=0 2024-12-12T19:32:29,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741900_1076 (size=12983) 2024-12-12T19:32:29,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741899_1075 (size=12983) 2024-12-12T19:32:29,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741901_1077 (size=12301) 2024-12-12T19:32:29,978 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/78a1e7cc6d0f4be69b3905b27db6f55a 2024-12-12T19:32:30,010 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/932d287f7ff44e2fbef5176a1aceaf2d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/932d287f7ff44e2fbef5176a1aceaf2d 2024-12-12T19:32:30,016 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/30e3cd75feaf4b518f8d506b3f6aad8f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/30e3cd75feaf4b518f8d506b3f6aad8f 2024-12-12T19:32:30,027 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/A of 6ffb87fb734b5d4ed7499f1da86f79f5 into 932d287f7ff44e2fbef5176a1aceaf2d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:30,027 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:30,027 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/A, priority=13, startTime=1734031949826; duration=0sec 2024-12-12T19:32:30,027 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:30,027 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:A 2024-12-12T19:32:30,027 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:30,038 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:30,038 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/C is initiating minor compaction (all files) 2024-12-12T19:32:30,038 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/C in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:30,038 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/33c5af4694e44a719f9294561c638442, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/5dfde6d47c124822b33efaa9cf3705b0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/45532d66faef4bb58076a135e7ee4f30] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=36.5 K 2024-12-12T19:32:30,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/997abcaf9ce54aa897cfa271c29f94ef is 50, key is test_row_0/B:col10/1734031949128/Put/seqid=0 2024-12-12T19:32:30,039 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33c5af4694e44a719f9294561c638442, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734031947353 2024-12-12T19:32:30,040 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5dfde6d47c124822b33efaa9cf3705b0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1734031947502 2024-12-12T19:32:30,048 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45532d66faef4bb58076a135e7ee4f30, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1734031947876 2024-12-12T19:32:30,064 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/B of 6ffb87fb734b5d4ed7499f1da86f79f5 into 30e3cd75feaf4b518f8d506b3f6aad8f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:30,064 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:30,064 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/B, priority=13, startTime=1734031949834; duration=0sec 2024-12-12T19:32:30,065 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:30,065 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:B 2024-12-12T19:32:30,100 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#C#compaction#64 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:30,101 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/4b156968ff104bb5b80ed74ad834c19b is 50, key is test_row_0/C:col10/1734031949041/Put/seqid=0 2024-12-12T19:32:30,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741902_1078 (size=12301) 2024-12-12T19:32:30,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741903_1079 (size=12983) 2024-12-12T19:32:30,207 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/4b156968ff104bb5b80ed74ad834c19b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/4b156968ff104bb5b80ed74ad834c19b 2024-12-12T19:32:30,234 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/C of 6ffb87fb734b5d4ed7499f1da86f79f5 into 4b156968ff104bb5b80ed74ad834c19b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:30,234 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:30,234 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/C, priority=13, startTime=1734031949835; duration=0sec 2024-12-12T19:32:30,234 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:30,234 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:C 2024-12-12T19:32:30,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:30,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:30,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:30,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032010319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:30,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:30,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032010326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:30,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:30,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032010326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:30,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:30,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032010332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:30,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032010325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:30,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:30,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032010436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:30,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:30,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032010438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:30,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:30,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032010440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:30,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:30,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032010452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:30,534 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/997abcaf9ce54aa897cfa271c29f94ef 2024-12-12T19:32:30,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/c746759c55344176ab5d855f2f5cfd18 is 50, key is test_row_0/C:col10/1734031949128/Put/seqid=0 2024-12-12T19:32:30,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741904_1080 (size=12301) 2024-12-12T19:32:30,595 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/c746759c55344176ab5d855f2f5cfd18 2024-12-12T19:32:30,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/78a1e7cc6d0f4be69b3905b27db6f55a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/78a1e7cc6d0f4be69b3905b27db6f55a 2024-12-12T19:32:30,622 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/78a1e7cc6d0f4be69b3905b27db6f55a, entries=150, sequenceid=303, filesize=12.0 K 2024-12-12T19:32:30,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/997abcaf9ce54aa897cfa271c29f94ef as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/997abcaf9ce54aa897cfa271c29f94ef 2024-12-12T19:32:30,644 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/997abcaf9ce54aa897cfa271c29f94ef, entries=150, sequenceid=303, filesize=12.0 K 2024-12-12T19:32:30,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032010642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:30,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/c746759c55344176ab5d855f2f5cfd18 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/c746759c55344176ab5d855f2f5cfd18 2024-12-12T19:32:30,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:30,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032010647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:30,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:30,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:30,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032010647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:30,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032010657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:30,663 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/c746759c55344176ab5d855f2f5cfd18, entries=150, sequenceid=303, filesize=12.0 K 2024-12-12T19:32:30,664 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 759ms, sequenceid=303, compaction requested=false 2024-12-12T19:32:30,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:30,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:30,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-12T19:32:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-12T19:32:30,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-12T19:32:30,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3740 sec 2024-12-12T19:32:30,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 3.3850 sec 2024-12-12T19:32:30,871 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T19:32:30,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:30,963 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T19:32:30,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:30,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:30,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:30,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:30,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:30,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:30,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/0a55c961d43f47e89c3b48627d2cd915 is 50, key is test_row_0/A:col10/1734031950324/Put/seqid=0 2024-12-12T19:32:31,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032011024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032011025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032011027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032011027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741905_1081 (size=14741) 2024-12-12T19:32:31,032 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/0a55c961d43f47e89c3b48627d2cd915 2024-12-12T19:32:31,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/5b758d7e36d4483fb72b4ff010dd20aa is 50, key is test_row_0/B:col10/1734031950324/Put/seqid=0 2024-12-12T19:32:31,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741906_1082 (size=12301) 2024-12-12T19:32:31,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032011129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032011129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032011131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032011131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032011331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032011337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032011340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032011364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032011364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T19:32:31,401 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-12T19:32:31,413 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:32:31,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-12T19:32:31,424 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:32:31,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T19:32:31,426 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:32:31,426 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:32:31,502 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/5b758d7e36d4483fb72b4ff010dd20aa 2024-12-12T19:32:31,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T19:32:31,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/0c1df61a95d14ba7ba488dc1875f3867 is 50, key is test_row_0/C:col10/1734031950324/Put/seqid=0 2024-12-12T19:32:31,579 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T19:32:31,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:31,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:31,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:31,581 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:31,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:31,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:31,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741907_1083 (size=12301) 2024-12-12T19:32:31,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032011653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032011653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032011675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:31,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032011688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T19:32:31,734 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,742 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T19:32:31,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:31,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:31,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:31,743 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:31,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:31,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:31,897 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:31,898 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T19:32:31,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:31,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:31,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:31,899 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:31,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:31,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,032 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/0c1df61a95d14ba7ba488dc1875f3867 2024-12-12T19:32:32,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T19:32:32,056 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,067 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T19:32:32,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:32,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:32,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:32,068 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/0a55c961d43f47e89c3b48627d2cd915 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/0a55c961d43f47e89c3b48627d2cd915 2024-12-12T19:32:32,085 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/0a55c961d43f47e89c3b48627d2cd915, entries=200, sequenceid=320, filesize=14.4 K 2024-12-12T19:32:32,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/5b758d7e36d4483fb72b4ff010dd20aa as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/5b758d7e36d4483fb72b4ff010dd20aa 2024-12-12T19:32:32,102 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/5b758d7e36d4483fb72b4ff010dd20aa, entries=150, sequenceid=320, filesize=12.0 K 2024-12-12T19:32:32,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/0c1df61a95d14ba7ba488dc1875f3867 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/0c1df61a95d14ba7ba488dc1875f3867 2024-12-12T19:32:32,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/0c1df61a95d14ba7ba488dc1875f3867, entries=150, sequenceid=320, filesize=12.0 K 2024-12-12T19:32:32,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 1158ms, sequenceid=320, compaction requested=true 2024-12-12T19:32:32,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:32,122 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:32,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:32:32,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:32,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:32:32,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:32,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ffb87fb734b5d4ed7499f1da86f79f5:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:32:32,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:32:32,124 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:32,127 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40025 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:32,127 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/A is initiating minor compaction (all files) 2024-12-12T19:32:32,127 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/A in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:32,128 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/932d287f7ff44e2fbef5176a1aceaf2d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/78a1e7cc6d0f4be69b3905b27db6f55a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/0a55c961d43f47e89c3b48627d2cd915] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=39.1 K 2024-12-12T19:32:32,128 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:32,128 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/B is initiating minor compaction (all files) 2024-12-12T19:32:32,128 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 932d287f7ff44e2fbef5176a1aceaf2d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1734031947876 2024-12-12T19:32:32,128 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/B in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:32,129 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/30e3cd75feaf4b518f8d506b3f6aad8f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/997abcaf9ce54aa897cfa271c29f94ef, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/5b758d7e36d4483fb72b4ff010dd20aa] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=36.7 K 2024-12-12T19:32:32,129 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78a1e7cc6d0f4be69b3905b27db6f55a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1734031949128 2024-12-12T19:32:32,130 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 30e3cd75feaf4b518f8d506b3f6aad8f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1734031947876 2024-12-12T19:32:32,130 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a55c961d43f47e89c3b48627d2cd915, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1734031950320 2024-12-12T19:32:32,131 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 997abcaf9ce54aa897cfa271c29f94ef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1734031949128 2024-12-12T19:32:32,132 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b758d7e36d4483fb72b4ff010dd20aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1734031950324 2024-12-12T19:32:32,156 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#A#compaction#69 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:32,157 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/6952b01b89c548a0864ec50d9dd1f82b is 50, key is test_row_0/A:col10/1734031950324/Put/seqid=0 2024-12-12T19:32:32,157 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#B#compaction#70 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:32,157 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/9abba5aa162f4edfaa2b9a3b5a51b04c is 50, key is test_row_0/B:col10/1734031950324/Put/seqid=0 2024-12-12T19:32:32,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:32,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T19:32:32,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:32,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:32,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:32,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:32,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:32,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:32,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/79b03f4ca9f44e5397bb131ec917cb8a is 50, key is test_row_0/A:col10/1734031952168/Put/seqid=0 2024-12-12T19:32:32,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741909_1085 (size=13085) 2024-12-12T19:32:32,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741908_1084 (size=13085) 2024-12-12T19:32:32,217 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:32,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032012211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:32,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032012216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:32,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032012218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,223 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T19:32:32,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:32,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:32,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:32,224 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:32,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032012223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741910_1086 (size=12301) 2024-12-12T19:32:32,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:32,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:32,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032012328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032012328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:32,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:32,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032012328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032012329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,380 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T19:32:32,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:32,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:32,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:32,381 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T19:32:32,536 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,539 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:32,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032012537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T19:32:32,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:32,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:32,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:32,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:32,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032012540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:32,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032012540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:32,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032012540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,627 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/6952b01b89c548a0864ec50d9dd1f82b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/6952b01b89c548a0864ec50d9dd1f82b 2024-12-12T19:32:32,640 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/9abba5aa162f4edfaa2b9a3b5a51b04c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/9abba5aa162f4edfaa2b9a3b5a51b04c 2024-12-12T19:32:32,655 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/79b03f4ca9f44e5397bb131ec917cb8a 2024-12-12T19:32:32,668 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/B of 6ffb87fb734b5d4ed7499f1da86f79f5 into 9abba5aa162f4edfaa2b9a3b5a51b04c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:32,680 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/A of 6ffb87fb734b5d4ed7499f1da86f79f5 into 6952b01b89c548a0864ec50d9dd1f82b(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:32,680 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:32,680 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/A, priority=13, startTime=1734031952122; duration=0sec 2024-12-12T19:32:32,680 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:32,680 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:A 2024-12-12T19:32:32,681 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:32,682 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:32,682 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/B, priority=13, startTime=1734031952122; duration=0sec 2024-12-12T19:32:32,682 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:32,682 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:B 2024-12-12T19:32:32,683 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:32,684 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 6ffb87fb734b5d4ed7499f1da86f79f5/C is initiating minor compaction (all files) 2024-12-12T19:32:32,684 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ffb87fb734b5d4ed7499f1da86f79f5/C in TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:32,684 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/4b156968ff104bb5b80ed74ad834c19b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/c746759c55344176ab5d855f2f5cfd18, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/0c1df61a95d14ba7ba488dc1875f3867] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp, totalSize=36.7 K 2024-12-12T19:32:32,685 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b156968ff104bb5b80ed74ad834c19b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1734031947876 2024-12-12T19:32:32,685 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting c746759c55344176ab5d855f2f5cfd18, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1734031949128 2024-12-12T19:32:32,686 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c1df61a95d14ba7ba488dc1875f3867, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1734031950324 2024-12-12T19:32:32,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/c3f9eb888fe74eaca4021f33c746d26e is 50, key is test_row_0/B:col10/1734031952168/Put/seqid=0 2024-12-12T19:32:32,693 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T19:32:32,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:32,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:32,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:32,694 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,723 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ffb87fb734b5d4ed7499f1da86f79f5#C#compaction#73 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:32,724 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/013fc8b289d34ace99d53a6205d096f3 is 50, key is test_row_0/C:col10/1734031950324/Put/seqid=0 2024-12-12T19:32:32,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741911_1087 (size=12301) 2024-12-12T19:32:32,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741912_1088 (size=13085) 2024-12-12T19:32:32,830 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/013fc8b289d34ace99d53a6205d096f3 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/013fc8b289d34ace99d53a6205d096f3 2024-12-12T19:32:32,849 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T19:32:32,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:32,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032012851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:32,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032012851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:32,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032012852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:32,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:32,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:32,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:32,855 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:32,864 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6ffb87fb734b5d4ed7499f1da86f79f5/C of 6ffb87fb734b5d4ed7499f1da86f79f5 into 013fc8b289d34ace99d53a6205d096f3(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:32,864 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:32,864 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., storeName=6ffb87fb734b5d4ed7499f1da86f79f5/C, priority=13, startTime=1734031952123; duration=0sec 2024-12-12T19:32:32,864 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:32,864 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ffb87fb734b5d4ed7499f1da86f79f5:C 2024-12-12T19:32:32,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:32,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032012863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:33,011 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:33,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T19:32:33,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:33,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:33,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:33,017 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:33,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:33,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:33,169 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:33,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T19:32:33,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:33,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:33,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:33,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:33,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:33,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:33,176 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/c3f9eb888fe74eaca4021f33c746d26e 2024-12-12T19:32:33,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/1d43dab1a0da46a887f3785901fbe0f1 is 50, key is test_row_0/C:col10/1734031952168/Put/seqid=0 2024-12-12T19:32:33,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741913_1089 (size=12301) 2024-12-12T19:32:33,327 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:33,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T19:32:33,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:33,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:33,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:33,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:33,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:33,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:33,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:33,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:33,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48506 deadline: 1734032013362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:33,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48530 deadline: 1734032013357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:33,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:33,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48532 deadline: 1734032013364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:33,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:33,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48482 deadline: 1734032013367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:33,371 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4235 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:32:33,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:33,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48466 deadline: 1734032013371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:33,489 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:33,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T19:32:33,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:33,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:33,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:33,493 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:33,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:33,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:33,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T19:32:33,570 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70267494 to 127.0.0.1:52216 2024-12-12T19:32:33,570 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:32:33,571 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x295cb1ac to 127.0.0.1:52216 2024-12-12T19:32:33,571 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:32:33,572 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d2a8e08 to 127.0.0.1:52216 2024-12-12T19:32:33,572 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:32:33,578 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c915d17 to 127.0.0.1:52216 2024-12-12T19:32:33,578 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:32:33,651 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:33,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T19:32:33,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:33,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:33,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:33,654 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:33,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:33,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:33,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/1d43dab1a0da46a887f3785901fbe0f1 2024-12-12T19:32:33,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/79b03f4ca9f44e5397bb131ec917cb8a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/79b03f4ca9f44e5397bb131ec917cb8a 2024-12-12T19:32:33,723 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/79b03f4ca9f44e5397bb131ec917cb8a, entries=150, sequenceid=345, filesize=12.0 K 2024-12-12T19:32:33,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/c3f9eb888fe74eaca4021f33c746d26e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/c3f9eb888fe74eaca4021f33c746d26e 2024-12-12T19:32:33,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/c3f9eb888fe74eaca4021f33c746d26e, entries=150, sequenceid=345, filesize=12.0 K 2024-12-12T19:32:33,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/1d43dab1a0da46a887f3785901fbe0f1 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/1d43dab1a0da46a887f3785901fbe0f1 2024-12-12T19:32:33,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/1d43dab1a0da46a887f3785901fbe0f1, entries=150, sequenceid=345, filesize=12.0 K 2024-12-12T19:32:33,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 1627ms, sequenceid=345, compaction requested=false 2024-12-12T19:32:33,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:33,811 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:33,812 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T19:32:33,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:33,813 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:32:33,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:33,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:33,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:33,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:33,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:33,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:33,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/3ca609f0be734276b23410450f86e935 is 50, key is test_row_0/A:col10/1734031952213/Put/seqid=0 2024-12-12T19:32:33,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741914_1090 (size=12301) 2024-12-12T19:32:34,251 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/3ca609f0be734276b23410450f86e935 2024-12-12T19:32:34,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/ce876b2cf9ad4551be4abe51ef805c9b is 50, key is test_row_0/B:col10/1734031952213/Put/seqid=0 2024-12-12T19:32:34,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741915_1091 (size=12301) 2024-12-12T19:32:34,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:34,384 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22cb07dd to 127.0.0.1:52216 2024-12-12T19:32:34,384 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:32:34,386 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38766d64 to 127.0.0.1:52216 2024-12-12T19:32:34,386 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:32:34,386 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f343a4d to 127.0.0.1:52216 2024-12-12T19:32:34,386 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:32:34,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. as already flushing 2024-12-12T19:32:34,396 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5400112e to 127.0.0.1:52216 2024-12-12T19:32:34,396 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:32:34,688 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/ce876b2cf9ad4551be4abe51ef805c9b 2024-12-12T19:32:34,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/a431f7ca2aab4a74983b67e57d965713 is 50, key is test_row_0/C:col10/1734031952213/Put/seqid=0 2024-12-12T19:32:34,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741916_1092 (size=12301) 2024-12-12T19:32:34,733 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/a431f7ca2aab4a74983b67e57d965713 2024-12-12T19:32:34,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/3ca609f0be734276b23410450f86e935 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/3ca609f0be734276b23410450f86e935 2024-12-12T19:32:34,766 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/3ca609f0be734276b23410450f86e935, entries=150, sequenceid=360, filesize=12.0 K 2024-12-12T19:32:34,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/ce876b2cf9ad4551be4abe51ef805c9b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ce876b2cf9ad4551be4abe51ef805c9b 2024-12-12T19:32:34,782 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ce876b2cf9ad4551be4abe51ef805c9b, entries=150, sequenceid=360, filesize=12.0 K 2024-12-12T19:32:34,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/a431f7ca2aab4a74983b67e57d965713 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/a431f7ca2aab4a74983b67e57d965713 2024-12-12T19:32:34,801 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/a431f7ca2aab4a74983b67e57d965713, entries=150, sequenceid=360, filesize=12.0 K 2024-12-12T19:32:34,805 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=26.84 KB/27480 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 993ms, sequenceid=360, compaction requested=true 2024-12-12T19:32:34,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:34,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:34,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-12T19:32:34,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-12T19:32:34,819 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-12T19:32:34,820 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3820 sec 2024-12-12T19:32:34,823 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 3.4080 sec 2024-12-12T19:32:35,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T19:32:35,543 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-12T19:32:36,556 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T19:32:36,559 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43366, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T19:32:37,382 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x478bae6b to 127.0.0.1:52216 2024-12-12T19:32:37,382 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:32:37,382 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T19:32:37,382 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-12-12T19:32:37,382 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-12-12T19:32:37,382 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 48 2024-12-12T19:32:37,382 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-12-12T19:32:37,382 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-12-12T19:32:37,382 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T19:32:37,382 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2237 2024-12-12T19:32:37,383 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2110 2024-12-12T19:32:37,383 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T19:32:37,383 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1040 2024-12-12T19:32:37,383 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3120 rows 2024-12-12T19:32:37,383 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1029 2024-12-12T19:32:37,383 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3087 rows 2024-12-12T19:32:37,383 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T19:32:37,383 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f6e36fe to 127.0.0.1:52216 2024-12-12T19:32:37,383 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:32:37,391 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T19:32:37,399 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T19:32:37,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T19:32:37,410 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734031957410"}]},"ts":"1734031957410"} 2024-12-12T19:32:37,412 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T19:32:37,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T19:32:37,423 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T19:32:37,425 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T19:32:37,429 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=28, ppid=27, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6ffb87fb734b5d4ed7499f1da86f79f5, UNASSIGN}] 2024-12-12T19:32:37,430 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=28, ppid=27, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6ffb87fb734b5d4ed7499f1da86f79f5, UNASSIGN 2024-12-12T19:32:37,430 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=28 updating hbase:meta row=6ffb87fb734b5d4ed7499f1da86f79f5, regionState=CLOSING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:37,431 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T19:32:37,431 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; CloseRegionProcedure 6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:32:37,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T19:32:37,587 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:37,590 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] handler.UnassignRegionHandler(124): Close 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:37,590 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T19:32:37,591 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1681): Closing 6ffb87fb734b5d4ed7499f1da86f79f5, disabling compactions & flushes 2024-12-12T19:32:37,591 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:37,591 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:37,591 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. after waiting 0 ms 2024-12-12T19:32:37,591 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:37,591 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(2837): Flushing 6ffb87fb734b5d4ed7499f1da86f79f5 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T19:32:37,592 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=A 2024-12-12T19:32:37,592 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:37,592 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=B 2024-12-12T19:32:37,592 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:37,592 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6ffb87fb734b5d4ed7499f1da86f79f5, store=C 2024-12-12T19:32:37,592 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:37,609 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/a38f34575a0a4f9abb1ccb22a3694303 is 50, key is test_row_0/A:col10/1734031957379/Put/seqid=0 2024-12-12T19:32:37,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741917_1093 (size=12301) 2024-12-12T19:32:37,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T19:32:38,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T19:32:38,038 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/a38f34575a0a4f9abb1ccb22a3694303 2024-12-12T19:32:38,057 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/e13bfa69f9d14c519af45cbcad7f9267 is 50, key is test_row_0/B:col10/1734031957379/Put/seqid=0 2024-12-12T19:32:38,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741918_1094 (size=12301) 2024-12-12T19:32:38,088 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/e13bfa69f9d14c519af45cbcad7f9267 2024-12-12T19:32:38,115 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/8c9ac8bd1f894bc8b4da5cbd42cf52a3 is 50, key is test_row_0/C:col10/1734031957379/Put/seqid=0 2024-12-12T19:32:38,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741919_1095 (size=12301) 2024-12-12T19:32:38,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T19:32:38,537 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/8c9ac8bd1f894bc8b4da5cbd42cf52a3 2024-12-12T19:32:38,549 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/A/a38f34575a0a4f9abb1ccb22a3694303 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/a38f34575a0a4f9abb1ccb22a3694303 2024-12-12T19:32:38,573 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/a38f34575a0a4f9abb1ccb22a3694303, entries=150, sequenceid=368, filesize=12.0 K 2024-12-12T19:32:38,574 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/B/e13bfa69f9d14c519af45cbcad7f9267 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/e13bfa69f9d14c519af45cbcad7f9267 2024-12-12T19:32:38,585 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/e13bfa69f9d14c519af45cbcad7f9267, entries=150, sequenceid=368, filesize=12.0 K 2024-12-12T19:32:38,586 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/.tmp/C/8c9ac8bd1f894bc8b4da5cbd42cf52a3 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/8c9ac8bd1f894bc8b4da5cbd42cf52a3 2024-12-12T19:32:38,601 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/8c9ac8bd1f894bc8b4da5cbd42cf52a3, entries=150, sequenceid=368, filesize=12.0 K 2024-12-12T19:32:38,614 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 6ffb87fb734b5d4ed7499f1da86f79f5 in 1023ms, sequenceid=368, compaction requested=true 2024-12-12T19:32:38,616 DEBUG [StoreCloser-TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/15ea42ec201543c2abd09b2a7b9a673b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/02619936cc5f4a2cb5a8f7fc8c7a4d16, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/e3992581ed774eaca0c901c4ea4321f4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/7adbeb30faf443cc8edca842b91ae976, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b504c6cef4234ffaab46b80ed917bd1b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/adfbbf51ea30449ab58677ceb156ebf9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/4a6435fc814841c8baf266be2f803a22, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b5b694b359b44ef6b556eec99f93b78b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/614cdba6a5ce47388160d0b7bba4b2f1, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/cbda0861236945dd8a598a98714afc3f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/de9f7997de974c15924bc84b5fc390e3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/43a3cb6f073c4b528629a4829f81c910, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/54b81f64b55941b386b514d7eea196fa, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/d7aef9e0c24744908134d68bd484696f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/da14247a4cf04d01b461cf9e545fe2b6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b38da28754cf4591a26838dd014b3e4b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/9b49ac1af8e140b28075902a23dd37c7, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/8ee69ed1e29944fbbf125cc7b4f24b95, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/8062aed84a5f4803a23807359fd3d357, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/932d287f7ff44e2fbef5176a1aceaf2d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/9bc35dfc884d49f4abaca8b58228007c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/78a1e7cc6d0f4be69b3905b27db6f55a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/0a55c961d43f47e89c3b48627d2cd915] to archive 2024-12-12T19:32:38,632 DEBUG [StoreCloser-TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:32:38,672 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/02619936cc5f4a2cb5a8f7fc8c7a4d16 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/02619936cc5f4a2cb5a8f7fc8c7a4d16 2024-12-12T19:32:38,679 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/15ea42ec201543c2abd09b2a7b9a673b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/15ea42ec201543c2abd09b2a7b9a673b 2024-12-12T19:32:38,684 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/e3992581ed774eaca0c901c4ea4321f4 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/e3992581ed774eaca0c901c4ea4321f4 2024-12-12T19:32:38,689 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/7adbeb30faf443cc8edca842b91ae976 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/7adbeb30faf443cc8edca842b91ae976 2024-12-12T19:32:38,690 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b504c6cef4234ffaab46b80ed917bd1b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b504c6cef4234ffaab46b80ed917bd1b 2024-12-12T19:32:38,699 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/adfbbf51ea30449ab58677ceb156ebf9 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/adfbbf51ea30449ab58677ceb156ebf9 2024-12-12T19:32:38,711 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/4a6435fc814841c8baf266be2f803a22 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/4a6435fc814841c8baf266be2f803a22 2024-12-12T19:32:38,720 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/cbda0861236945dd8a598a98714afc3f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/cbda0861236945dd8a598a98714afc3f 2024-12-12T19:32:38,721 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/614cdba6a5ce47388160d0b7bba4b2f1 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/614cdba6a5ce47388160d0b7bba4b2f1 2024-12-12T19:32:38,725 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b5b694b359b44ef6b556eec99f93b78b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b5b694b359b44ef6b556eec99f93b78b 2024-12-12T19:32:38,725 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/43a3cb6f073c4b528629a4829f81c910 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/43a3cb6f073c4b528629a4829f81c910 2024-12-12T19:32:38,727 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/d7aef9e0c24744908134d68bd484696f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/d7aef9e0c24744908134d68bd484696f 2024-12-12T19:32:38,727 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/54b81f64b55941b386b514d7eea196fa to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/54b81f64b55941b386b514d7eea196fa 2024-12-12T19:32:38,733 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/de9f7997de974c15924bc84b5fc390e3 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/de9f7997de974c15924bc84b5fc390e3 2024-12-12T19:32:38,735 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/9b49ac1af8e140b28075902a23dd37c7 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/9b49ac1af8e140b28075902a23dd37c7 2024-12-12T19:32:38,739 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/da14247a4cf04d01b461cf9e545fe2b6 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/da14247a4cf04d01b461cf9e545fe2b6 2024-12-12T19:32:38,745 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b38da28754cf4591a26838dd014b3e4b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/b38da28754cf4591a26838dd014b3e4b 2024-12-12T19:32:38,750 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/932d287f7ff44e2fbef5176a1aceaf2d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/932d287f7ff44e2fbef5176a1aceaf2d 2024-12-12T19:32:38,751 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/8062aed84a5f4803a23807359fd3d357 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/8062aed84a5f4803a23807359fd3d357 2024-12-12T19:32:38,759 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/8ee69ed1e29944fbbf125cc7b4f24b95 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/8ee69ed1e29944fbbf125cc7b4f24b95 2024-12-12T19:32:38,763 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/9bc35dfc884d49f4abaca8b58228007c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/9bc35dfc884d49f4abaca8b58228007c 2024-12-12T19:32:38,763 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/0a55c961d43f47e89c3b48627d2cd915 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/0a55c961d43f47e89c3b48627d2cd915 2024-12-12T19:32:38,777 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/78a1e7cc6d0f4be69b3905b27db6f55a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/78a1e7cc6d0f4be69b3905b27db6f55a 2024-12-12T19:32:38,800 DEBUG [StoreCloser-TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ea9f18f99ffd4e4ca6cd8a982e550f3d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/8fcc61c7708444638d783a1353eaf488, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ce97df282b6e4693a10870c3a85a46b9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/c533a685d12f4830b0348bc0ebf73a47, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/6a150395dacb43298ed8533f0399c665, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/947a2caf0f95463bb9a4bb4a2f42d01c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/b9e95e0ebfb44eea8de4a4ed316fcb1c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/153aff2a404445fbb9f925d6b88ce585, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/57b2864237e24afe8ca39a76ff719c11, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/a5faadbe07ae40339f4dcc65ab01c990, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/824fc214d8124c0a85df657c3adb1fb0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/474402912cb34d2e8b52d088ccb00fdd, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ba2c5e908cdd42058683e6ca34df00dc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/7be4eb96e9bd406f8aaf4fbd4e398d1d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/b53014c90f9344a8af1f371a940fddfd, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/e53e67affbe14ccabcc1f6787c019dd4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/bbac304451044fff8d0b2a1f4c5035f9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/d81d484da1384ca3a52ae40bd4e70744, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/fe5a74bd31594c919c0ea3d5c7a009ce, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/30e3cd75feaf4b518f8d506b3f6aad8f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/e82afb77f36042bdb0bf9a85b88b7ea1, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/997abcaf9ce54aa897cfa271c29f94ef, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/5b758d7e36d4483fb72b4ff010dd20aa] to archive 2024-12-12T19:32:38,804 DEBUG [StoreCloser-TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:32:38,832 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/b9e95e0ebfb44eea8de4a4ed316fcb1c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/b9e95e0ebfb44eea8de4a4ed316fcb1c 2024-12-12T19:32:38,832 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/8fcc61c7708444638d783a1353eaf488 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/8fcc61c7708444638d783a1353eaf488 2024-12-12T19:32:38,832 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/c533a685d12f4830b0348bc0ebf73a47 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/c533a685d12f4830b0348bc0ebf73a47 2024-12-12T19:32:38,832 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/153aff2a404445fbb9f925d6b88ce585 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/153aff2a404445fbb9f925d6b88ce585 2024-12-12T19:32:38,832 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ce97df282b6e4693a10870c3a85a46b9 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ce97df282b6e4693a10870c3a85a46b9 2024-12-12T19:32:38,832 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/947a2caf0f95463bb9a4bb4a2f42d01c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/947a2caf0f95463bb9a4bb4a2f42d01c 2024-12-12T19:32:38,833 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/6a150395dacb43298ed8533f0399c665 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/6a150395dacb43298ed8533f0399c665 2024-12-12T19:32:38,836 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ea9f18f99ffd4e4ca6cd8a982e550f3d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ea9f18f99ffd4e4ca6cd8a982e550f3d 2024-12-12T19:32:38,837 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/a5faadbe07ae40339f4dcc65ab01c990 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/a5faadbe07ae40339f4dcc65ab01c990 2024-12-12T19:32:38,842 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/57b2864237e24afe8ca39a76ff719c11 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/57b2864237e24afe8ca39a76ff719c11 2024-12-12T19:32:38,842 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/824fc214d8124c0a85df657c3adb1fb0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/824fc214d8124c0a85df657c3adb1fb0 2024-12-12T19:32:38,843 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/474402912cb34d2e8b52d088ccb00fdd to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/474402912cb34d2e8b52d088ccb00fdd 2024-12-12T19:32:38,843 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/7be4eb96e9bd406f8aaf4fbd4e398d1d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/7be4eb96e9bd406f8aaf4fbd4e398d1d 2024-12-12T19:32:38,844 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ba2c5e908cdd42058683e6ca34df00dc to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ba2c5e908cdd42058683e6ca34df00dc 2024-12-12T19:32:38,845 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/bbac304451044fff8d0b2a1f4c5035f9 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/bbac304451044fff8d0b2a1f4c5035f9 2024-12-12T19:32:38,845 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/e53e67affbe14ccabcc1f6787c019dd4 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/e53e67affbe14ccabcc1f6787c019dd4 2024-12-12T19:32:38,846 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/b53014c90f9344a8af1f371a940fddfd to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/b53014c90f9344a8af1f371a940fddfd 2024-12-12T19:32:38,848 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/30e3cd75feaf4b518f8d506b3f6aad8f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/30e3cd75feaf4b518f8d506b3f6aad8f 2024-12-12T19:32:38,848 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/fe5a74bd31594c919c0ea3d5c7a009ce to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/fe5a74bd31594c919c0ea3d5c7a009ce 2024-12-12T19:32:38,848 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/e82afb77f36042bdb0bf9a85b88b7ea1 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/e82afb77f36042bdb0bf9a85b88b7ea1 2024-12-12T19:32:38,848 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/997abcaf9ce54aa897cfa271c29f94ef to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/997abcaf9ce54aa897cfa271c29f94ef 2024-12-12T19:32:38,849 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/5b758d7e36d4483fb72b4ff010dd20aa to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/5b758d7e36d4483fb72b4ff010dd20aa 2024-12-12T19:32:38,856 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/d81d484da1384ca3a52ae40bd4e70744 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/d81d484da1384ca3a52ae40bd4e70744 2024-12-12T19:32:38,864 DEBUG [StoreCloser-TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/1fc220ea582a4748872cd723c0cf5c8d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/6ccdace211214f95b74d46ad9007dd93, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/e9ac9572c8ef4b9e8bd77f0169e463fb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/e9b48ea1642e4f33a1d06cb8e7650176, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/404cf232a05240faa5bf98388ca9750e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/8e308e420f1b4da28cffc877bc6ef5f9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/7b13b264606a470aa0786eb1f4799c18, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/59c8ea0fae684376b2bb176542b27aea, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/ca238867812343259a80d1c93d6d4901, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/7268bcda2e8b416f89f9477b83cf336c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/38b5c08af0974447b8754ca5f1bc0b9c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/1627466b70b94010b9a904725922995d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/b3354e136b2f49a59584f3e1dfcc1aa0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/fed750e3cf694848b40df127bacd7ae3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/7660103287c2422e8acbf811add3497b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/47c2929223dd45ad89645f72bf0b9e3e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/33c5af4694e44a719f9294561c638442, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/9bff5682d2be4461b0a6cd68c06bc330, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/5dfde6d47c124822b33efaa9cf3705b0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/4b156968ff104bb5b80ed74ad834c19b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/45532d66faef4bb58076a135e7ee4f30, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/c746759c55344176ab5d855f2f5cfd18, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/0c1df61a95d14ba7ba488dc1875f3867] to archive 2024-12-12T19:32:38,867 DEBUG [StoreCloser-TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:32:38,888 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/1fc220ea582a4748872cd723c0cf5c8d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/1fc220ea582a4748872cd723c0cf5c8d 2024-12-12T19:32:38,895 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/59c8ea0fae684376b2bb176542b27aea to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/59c8ea0fae684376b2bb176542b27aea 2024-12-12T19:32:38,895 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/7b13b264606a470aa0786eb1f4799c18 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/7b13b264606a470aa0786eb1f4799c18 2024-12-12T19:32:38,895 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/e9b48ea1642e4f33a1d06cb8e7650176 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/e9b48ea1642e4f33a1d06cb8e7650176 2024-12-12T19:32:38,896 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/e9ac9572c8ef4b9e8bd77f0169e463fb to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/e9ac9572c8ef4b9e8bd77f0169e463fb 2024-12-12T19:32:38,897 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/6ccdace211214f95b74d46ad9007dd93 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/6ccdace211214f95b74d46ad9007dd93 2024-12-12T19:32:38,899 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/404cf232a05240faa5bf98388ca9750e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/404cf232a05240faa5bf98388ca9750e 2024-12-12T19:32:38,899 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/8e308e420f1b4da28cffc877bc6ef5f9 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/8e308e420f1b4da28cffc877bc6ef5f9 2024-12-12T19:32:38,907 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/ca238867812343259a80d1c93d6d4901 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/ca238867812343259a80d1c93d6d4901 2024-12-12T19:32:38,913 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/1627466b70b94010b9a904725922995d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/1627466b70b94010b9a904725922995d 2024-12-12T19:32:38,913 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/38b5c08af0974447b8754ca5f1bc0b9c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/38b5c08af0974447b8754ca5f1bc0b9c 2024-12-12T19:32:38,913 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/7268bcda2e8b416f89f9477b83cf336c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/7268bcda2e8b416f89f9477b83cf336c 2024-12-12T19:32:38,914 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/b3354e136b2f49a59584f3e1dfcc1aa0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/b3354e136b2f49a59584f3e1dfcc1aa0 2024-12-12T19:32:38,920 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/47c2929223dd45ad89645f72bf0b9e3e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/47c2929223dd45ad89645f72bf0b9e3e 2024-12-12T19:32:38,920 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/fed750e3cf694848b40df127bacd7ae3 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/fed750e3cf694848b40df127bacd7ae3 2024-12-12T19:32:38,921 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/7660103287c2422e8acbf811add3497b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/7660103287c2422e8acbf811add3497b 2024-12-12T19:32:38,924 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/33c5af4694e44a719f9294561c638442 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/33c5af4694e44a719f9294561c638442 2024-12-12T19:32:38,931 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/45532d66faef4bb58076a135e7ee4f30 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/45532d66faef4bb58076a135e7ee4f30 2024-12-12T19:32:38,931 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/5dfde6d47c124822b33efaa9cf3705b0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/5dfde6d47c124822b33efaa9cf3705b0 2024-12-12T19:32:38,932 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/9bff5682d2be4461b0a6cd68c06bc330 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/9bff5682d2be4461b0a6cd68c06bc330 2024-12-12T19:32:38,932 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/4b156968ff104bb5b80ed74ad834c19b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/4b156968ff104bb5b80ed74ad834c19b 2024-12-12T19:32:38,933 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/0c1df61a95d14ba7ba488dc1875f3867 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/0c1df61a95d14ba7ba488dc1875f3867 2024-12-12T19:32:38,933 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/c746759c55344176ab5d855f2f5cfd18 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/c746759c55344176ab5d855f2f5cfd18 2024-12-12T19:32:38,959 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/recovered.edits/371.seqid, newMaxSeqId=371, maxSeqId=1 2024-12-12T19:32:38,966 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5. 2024-12-12T19:32:38,966 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1635): Region close journal for 6ffb87fb734b5d4ed7499f1da86f79f5: 2024-12-12T19:32:38,973 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] handler.UnassignRegionHandler(170): Closed 6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:38,976 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=28 updating hbase:meta row=6ffb87fb734b5d4ed7499f1da86f79f5, regionState=CLOSED 2024-12-12T19:32:38,999 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-12T19:32:38,999 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; CloseRegionProcedure 6ffb87fb734b5d4ed7499f1da86f79f5, server=4c9c438b6eeb,42689,1734031923038 in 1.5560 sec 2024-12-12T19:32:39,005 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=28, resume processing ppid=27 2024-12-12T19:32:39,005 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, ppid=27, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6ffb87fb734b5d4ed7499f1da86f79f5, UNASSIGN in 1.5700 sec 2024-12-12T19:32:39,018 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-12T19:32:39,018 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5860 sec 2024-12-12T19:32:39,020 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734031959020"}]},"ts":"1734031959020"} 2024-12-12T19:32:39,023 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T19:32:39,040 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T19:32:39,045 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6430 sec 2024-12-12T19:32:39,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T19:32:39,530 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-12T19:32:39,535 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T19:32:39,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:32:39,547 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=30, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:32:39,551 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=30, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:32:39,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T19:32:39,569 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:39,604 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/recovered.edits] 2024-12-12T19:32:39,621 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/6952b01b89c548a0864ec50d9dd1f82b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/6952b01b89c548a0864ec50d9dd1f82b 2024-12-12T19:32:39,622 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/3ca609f0be734276b23410450f86e935 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/3ca609f0be734276b23410450f86e935 2024-12-12T19:32:39,622 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/79b03f4ca9f44e5397bb131ec917cb8a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/79b03f4ca9f44e5397bb131ec917cb8a 2024-12-12T19:32:39,623 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/a38f34575a0a4f9abb1ccb22a3694303 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/A/a38f34575a0a4f9abb1ccb22a3694303 2024-12-12T19:32:39,636 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/c3f9eb888fe74eaca4021f33c746d26e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/c3f9eb888fe74eaca4021f33c746d26e 2024-12-12T19:32:39,636 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ce876b2cf9ad4551be4abe51ef805c9b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/ce876b2cf9ad4551be4abe51ef805c9b 2024-12-12T19:32:39,636 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/e13bfa69f9d14c519af45cbcad7f9267 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/e13bfa69f9d14c519af45cbcad7f9267 2024-12-12T19:32:39,640 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/9abba5aa162f4edfaa2b9a3b5a51b04c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/B/9abba5aa162f4edfaa2b9a3b5a51b04c 2024-12-12T19:32:39,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T19:32:39,660 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/013fc8b289d34ace99d53a6205d096f3 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/013fc8b289d34ace99d53a6205d096f3 2024-12-12T19:32:39,660 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/1d43dab1a0da46a887f3785901fbe0f1 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/1d43dab1a0da46a887f3785901fbe0f1 2024-12-12T19:32:39,660 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/a431f7ca2aab4a74983b67e57d965713 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/a431f7ca2aab4a74983b67e57d965713 2024-12-12T19:32:39,660 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/8c9ac8bd1f894bc8b4da5cbd42cf52a3 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/C/8c9ac8bd1f894bc8b4da5cbd42cf52a3 2024-12-12T19:32:39,680 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/recovered.edits/371.seqid to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5/recovered.edits/371.seqid 2024-12-12T19:32:39,683 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/6ffb87fb734b5d4ed7499f1da86f79f5 2024-12-12T19:32:39,683 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T19:32:39,709 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=30, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:32:39,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-12T19:32:39,743 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T19:32:39,833 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T19:32:39,835 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=30, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:32:39,835 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T19:32:39,835 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734031959835"}]},"ts":"9223372036854775807"} 2024-12-12T19:32:39,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T19:32:39,867 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T19:32:39,867 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 6ffb87fb734b5d4ed7499f1da86f79f5, NAME => 'TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T19:32:39,867 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T19:32:39,868 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734031959867"}]},"ts":"9223372036854775807"} 2024-12-12T19:32:39,888 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T19:32:39,926 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=30, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:32:39,929 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 391 msec 2024-12-12T19:32:40,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T19:32:40,163 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-12T19:32:40,186 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=244 (was 218) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x77ea215e-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;4c9c438b6eeb:42689-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1315904107_22 at /127.0.0.1:57990 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x77ea215e-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x77ea215e-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x77ea215e-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1254 (was 1059) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=9940 (was 9951) 2024-12-12T19:32:40,203 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=244, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=1254, ProcessCount=11, AvailableMemoryMB=9938 2024-12-12T19:32:40,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T19:32:40,206 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T19:32:40,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=31, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T19:32:40,208 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T19:32:40,209 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:40,209 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 31 2024-12-12T19:32:40,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-12T19:32:40,210 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T19:32:40,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741920_1096 (size=963) 2024-12-12T19:32:40,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-12T19:32:40,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-12T19:32:40,636 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98 2024-12-12T19:32:40,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741921_1097 (size=53) 2024-12-12T19:32:40,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-12T19:32:41,099 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:32:41,099 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 2c602e7d1968409c889fb12ef89e5146, disabling compactions & flushes 2024-12-12T19:32:41,099 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:41,099 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:41,099 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. after waiting 0 ms 2024-12-12T19:32:41,100 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:41,100 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:41,100 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:41,101 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T19:32:41,101 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734031961101"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734031961101"}]},"ts":"1734031961101"} 2024-12-12T19:32:41,104 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T19:32:41,105 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T19:32:41,106 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734031961105"}]},"ts":"1734031961105"} 2024-12-12T19:32:41,107 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T19:32:41,211 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c602e7d1968409c889fb12ef89e5146, ASSIGN}] 2024-12-12T19:32:41,216 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c602e7d1968409c889fb12ef89e5146, ASSIGN 2024-12-12T19:32:41,217 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c602e7d1968409c889fb12ef89e5146, ASSIGN; state=OFFLINE, location=4c9c438b6eeb,42689,1734031923038; forceNewPlan=false, retain=false 2024-12-12T19:32:41,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-12T19:32:41,369 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=2c602e7d1968409c889fb12ef89e5146, regionState=OPENING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:41,374 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; OpenRegionProcedure 2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:32:41,527 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:41,537 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:41,537 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7285): Opening region: {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} 2024-12-12T19:32:41,538 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:41,538 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:32:41,538 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7327): checking encryption for 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:41,538 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7330): checking classloading for 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:41,540 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:41,541 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:32:41,542 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c602e7d1968409c889fb12ef89e5146 columnFamilyName A 2024-12-12T19:32:41,542 DEBUG [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:41,542 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.HStore(327): Store=2c602e7d1968409c889fb12ef89e5146/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:32:41,543 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:41,548 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:32:41,549 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c602e7d1968409c889fb12ef89e5146 columnFamilyName B 2024-12-12T19:32:41,549 DEBUG [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:41,550 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.HStore(327): Store=2c602e7d1968409c889fb12ef89e5146/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:32:41,550 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:41,552 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:32:41,552 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c602e7d1968409c889fb12ef89e5146 columnFamilyName C 2024-12-12T19:32:41,552 DEBUG [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:41,553 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.HStore(327): Store=2c602e7d1968409c889fb12ef89e5146/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:32:41,553 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:41,554 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:41,555 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:41,558 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T19:32:41,560 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1085): writing seq id for 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:41,562 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T19:32:41,563 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1102): Opened 2c602e7d1968409c889fb12ef89e5146; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63499771, jitterRate=-0.05377967655658722}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T19:32:41,564 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1001): Region open journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:41,565 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., pid=33, masterSystemTime=1734031961527 2024-12-12T19:32:41,567 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:41,567 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:41,567 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=2c602e7d1968409c889fb12ef89e5146, regionState=OPEN, openSeqNum=2, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:41,570 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-12T19:32:41,570 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; OpenRegionProcedure 2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 in 195 msec 2024-12-12T19:32:41,572 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=31 2024-12-12T19:32:41,572 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=31, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c602e7d1968409c889fb12ef89e5146, ASSIGN in 359 msec 2024-12-12T19:32:41,573 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T19:32:41,573 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734031961573"}]},"ts":"1734031961573"} 2024-12-12T19:32:41,575 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T19:32:41,612 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T19:32:41,615 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.4070 sec 2024-12-12T19:32:42,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-12T19:32:42,334 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 31 completed 2024-12-12T19:32:42,336 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26401a5f to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@407e6b5c 2024-12-12T19:32:42,415 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6eb305fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:42,427 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:42,445 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44198, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:42,461 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T19:32:42,465 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39972, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T19:32:42,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T19:32:42,477 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T19:32:42,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-12T19:32:42,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741922_1098 (size=999) 2024-12-12T19:32:42,947 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-12T19:32:42,947 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-12T19:32:42,952 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T19:32:42,965 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c602e7d1968409c889fb12ef89e5146, REOPEN/MOVE}] 2024-12-12T19:32:42,967 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c602e7d1968409c889fb12ef89e5146, REOPEN/MOVE 2024-12-12T19:32:42,969 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=2c602e7d1968409c889fb12ef89e5146, regionState=CLOSING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:42,971 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T19:32:42,972 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; CloseRegionProcedure 2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:32:43,124 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:43,125 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(124): Close 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:43,125 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T19:32:43,125 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1681): Closing 2c602e7d1968409c889fb12ef89e5146, disabling compactions & flushes 2024-12-12T19:32:43,125 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:43,125 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:43,125 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. after waiting 0 ms 2024-12-12T19:32:43,125 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:43,130 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-12T19:32:43,131 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:43,131 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1635): Region close journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:43,131 WARN [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionServer(3786): Not adding moved region record: 2c602e7d1968409c889fb12ef89e5146 to self. 2024-12-12T19:32:43,136 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(170): Closed 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:43,137 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=2c602e7d1968409c889fb12ef89e5146, regionState=CLOSED 2024-12-12T19:32:43,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-12T19:32:43,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseRegionProcedure 2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 in 166 msec 2024-12-12T19:32:43,141 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c602e7d1968409c889fb12ef89e5146, REOPEN/MOVE; state=CLOSED, location=4c9c438b6eeb,42689,1734031923038; forceNewPlan=false, retain=true 2024-12-12T19:32:43,292 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=2c602e7d1968409c889fb12ef89e5146, regionState=OPENING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:43,295 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=36, state=RUNNABLE; OpenRegionProcedure 2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:32:43,448 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:43,452 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:43,452 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(7285): Opening region: {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} 2024-12-12T19:32:43,453 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:43,453 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:32:43,453 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(7327): checking encryption for 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:43,453 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(7330): checking classloading for 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:43,456 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:43,457 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:32:43,464 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c602e7d1968409c889fb12ef89e5146 columnFamilyName A 2024-12-12T19:32:43,466 DEBUG [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:43,467 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.HStore(327): Store=2c602e7d1968409c889fb12ef89e5146/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:32:43,468 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:43,469 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:32:43,469 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c602e7d1968409c889fb12ef89e5146 columnFamilyName B 2024-12-12T19:32:43,469 DEBUG [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:43,470 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.HStore(327): Store=2c602e7d1968409c889fb12ef89e5146/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:32:43,470 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:43,471 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:32:43,472 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c602e7d1968409c889fb12ef89e5146 columnFamilyName C 2024-12-12T19:32:43,472 DEBUG [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:43,472 INFO [StoreOpener-2c602e7d1968409c889fb12ef89e5146-1 {}] regionserver.HStore(327): Store=2c602e7d1968409c889fb12ef89e5146/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:32:43,473 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:43,474 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:43,475 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:43,477 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T19:32:43,482 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(1085): writing seq id for 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:43,484 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(1102): Opened 2c602e7d1968409c889fb12ef89e5146; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60815165, jitterRate=-0.0937834233045578}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T19:32:43,486 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(1001): Region open journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:43,487 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., pid=38, masterSystemTime=1734031963448 2024-12-12T19:32:43,489 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:43,489 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:43,490 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=2c602e7d1968409c889fb12ef89e5146, regionState=OPEN, openSeqNum=5, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:43,492 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=36 2024-12-12T19:32:43,493 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=36, state=SUCCESS; OpenRegionProcedure 2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 in 196 msec 2024-12-12T19:32:43,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-12-12T19:32:43,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c602e7d1968409c889fb12ef89e5146, REOPEN/MOVE in 527 msec 2024-12-12T19:32:43,499 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-12-12T19:32:43,499 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 545 msec 2024-12-12T19:32:43,505 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 1.0200 sec 2024-12-12T19:32:43,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-12T19:32:43,529 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c820ef9 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b4bd1ba 2024-12-12T19:32:43,568 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@176c5c1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:43,570 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e3a4420 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ebda6ad 2024-12-12T19:32:43,640 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@190853fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:43,642 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42e904d8 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@505d5ccd 2024-12-12T19:32:43,687 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46114993, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:43,689 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a4c53ed to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@367f47f7 2024-12-12T19:32:43,711 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68f0be85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:43,713 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22e911df to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78cafade 2024-12-12T19:32:43,769 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@152377d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:43,772 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14c16cd4 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a52344f 2024-12-12T19:32:43,830 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3448d233, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:43,835 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0341384e to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8ba8425 2024-12-12T19:32:43,857 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a11164b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:43,859 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26b120d9 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7af61386 2024-12-12T19:32:43,910 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a7e1dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:43,911 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c1ec7ee to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63e87c8 2024-12-12T19:32:43,938 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31a027db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:32:43,950 DEBUG [hconnection-0x354d3b55-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:43,951 DEBUG [hconnection-0x2cb28f91-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:43,953 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44204, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:43,963 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44212, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:43,992 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:32:43,991 DEBUG [hconnection-0x52ee7987-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:43,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=39, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees 2024-12-12T19:32:43,994 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=39, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:32:43,994 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44228, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:43,996 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=39, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:32:43,996 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:32:43,999 DEBUG [hconnection-0xc353fb5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:44,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-12T19:32:44,003 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44244, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:44,016 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:32:44,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:32:44,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:44,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:32:44,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:44,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:32:44,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:44,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:44,043 DEBUG [hconnection-0x1752ebcf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:44,045 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44254, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:44,056 DEBUG [hconnection-0x507aa03d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:44,059 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44256, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:44,063 DEBUG [hconnection-0x5baca351-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:44,066 DEBUG [hconnection-0x7acfe2ca-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:44,068 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44258, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:44,069 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44268, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:44,083 DEBUG [hconnection-0x7b48ac02-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:32:44,085 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44282, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:32:44,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-12T19:32:44,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44256 deadline: 1734032024104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44254 deadline: 1734032024123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44244 deadline: 1734032024129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032024100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,147 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T19:32:44,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:44,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:44,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:44,148 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44212 deadline: 1734032024128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,199 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212445eb9f37dd74b9d912d4f061168533b_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_0/A:col10/1734031964013/Put/seqid=0 2024-12-12T19:32:44,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44256 deadline: 1734032024235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44254 deadline: 1734032024236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44244 deadline: 1734032024247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741923_1099 (size=12154) 2024-12-12T19:32:44,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032024254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,265 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:44,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44212 deadline: 1734032024268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,277 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212445eb9f37dd74b9d912d4f061168533b_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212445eb9f37dd74b9d912d4f061168533b_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:44,278 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/99608738123b4a11a5887f145f01b50a, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:44,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/99608738123b4a11a5887f145f01b50a is 175, key is test_row_0/A:col10/1734031964013/Put/seqid=0 2024-12-12T19:32:44,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-12T19:32:44,310 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,311 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T19:32:44,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:44,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:44,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:44,311 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741924_1100 (size=30955) 2024-12-12T19:32:44,349 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/99608738123b4a11a5887f145f01b50a 2024-12-12T19:32:44,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/06c29e6bbea34ad8bb844967201e21f9 is 50, key is test_row_0/B:col10/1734031964013/Put/seqid=0 2024-12-12T19:32:44,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44256 deadline: 1734032024444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44254 deadline: 1734032024452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741925_1101 (size=12001) 2024-12-12T19:32:44,463 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/06c29e6bbea34ad8bb844967201e21f9 2024-12-12T19:32:44,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44244 deadline: 1734032024460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,467 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,477 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T19:32:44,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:44,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:44,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:44,478 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032024479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44212 deadline: 1734032024479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,537 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/0e29d29a60b34f8cbaf00c63c4f6e291 is 50, key is test_row_0/C:col10/1734031964013/Put/seqid=0 2024-12-12T19:32:44,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741926_1102 (size=12001) 2024-12-12T19:32:44,563 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/0e29d29a60b34f8cbaf00c63c4f6e291 2024-12-12T19:32:44,584 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T19:32:44,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/99608738123b4a11a5887f145f01b50a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/99608738123b4a11a5887f145f01b50a 2024-12-12T19:32:44,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/99608738123b4a11a5887f145f01b50a, entries=150, sequenceid=16, filesize=30.2 K 2024-12-12T19:32:44,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/06c29e6bbea34ad8bb844967201e21f9 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/06c29e6bbea34ad8bb844967201e21f9 2024-12-12T19:32:44,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-12T19:32:44,630 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,634 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T19:32:44,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:44,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:44,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:44,635 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,636 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/06c29e6bbea34ad8bb844967201e21f9, entries=150, sequenceid=16, filesize=11.7 K 2024-12-12T19:32:44,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/0e29d29a60b34f8cbaf00c63c4f6e291 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/0e29d29a60b34f8cbaf00c63c4f6e291 2024-12-12T19:32:44,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,671 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/0e29d29a60b34f8cbaf00c63c4f6e291, entries=150, sequenceid=16, filesize=11.7 K 2024-12-12T19:32:44,680 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 2c602e7d1968409c889fb12ef89e5146 in 663ms, sequenceid=16, compaction requested=false 2024-12-12T19:32:44,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:44,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:44,768 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-12T19:32:44,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:32:44,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:44,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:32:44,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:44,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:32:44,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:44,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121289cde2eff09144efbdc793443d73c815_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_0/A:col10/1734031964096/Put/seqid=0 2024-12-12T19:32:44,793 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,793 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T19:32:44,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:44,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:44,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:44,794 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44244 deadline: 1734032024789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032024790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741927_1103 (size=14594) 2024-12-12T19:32:44,853 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:44,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44256 deadline: 1734032024843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44254 deadline: 1734032024849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44212 deadline: 1734032024849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,860 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121289cde2eff09144efbdc793443d73c815_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121289cde2eff09144efbdc793443d73c815_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:44,863 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/bce9068fcf9b44ad98977f2f0e7bbc3d, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:44,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/bce9068fcf9b44ad98977f2f0e7bbc3d is 175, key is test_row_0/A:col10/1734031964096/Put/seqid=0 2024-12-12T19:32:44,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741928_1104 (size=39549) 2024-12-12T19:32:44,950 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T19:32:44,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:44,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:44,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:44,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:44,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44244 deadline: 1734032024953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44256 deadline: 1734032024963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44212 deadline: 1734032024964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:44,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:44,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44254 deadline: 1734032024963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,106 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T19:32:45,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:45,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:45,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:45,108 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-12T19:32:45,157 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:45,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44244 deadline: 1734032025157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,174 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:45,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44256 deadline: 1734032025174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:45,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44212 deadline: 1734032025176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:45,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44254 deadline: 1734032025176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,263 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,266 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T19:32:45,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:45,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:45,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:45,272 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,331 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=43, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/bce9068fcf9b44ad98977f2f0e7bbc3d 2024-12-12T19:32:45,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:45,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032025351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,383 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/d609d8a023df4093b835526bfb7917b1 is 50, key is test_row_0/B:col10/1734031964096/Put/seqid=0 2024-12-12T19:32:45,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741929_1105 (size=12001) 2024-12-12T19:32:45,429 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,431 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T19:32:45,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:45,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:45,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:45,432 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:45,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44244 deadline: 1734032025460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:45,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44256 deadline: 1734032025483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:45,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44212 deadline: 1734032025487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:45,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44254 deadline: 1734032025487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,603 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T19:32:45,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:45,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:45,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:45,605 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,758 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T19:32:45,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:45,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:45,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:45,760 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,825 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/d609d8a023df4093b835526bfb7917b1 2024-12-12T19:32:45,860 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/f710db0f8b2a4bffab8e9ad326b35b5c is 50, key is test_row_0/C:col10/1734031964096/Put/seqid=0 2024-12-12T19:32:45,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741930_1106 (size=12001) 2024-12-12T19:32:45,915 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,916 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T19:32:45,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:45,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:45,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:45,920 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:45,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:45,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44244 deadline: 1734032025967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:45,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44254 deadline: 1734032025997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:45,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:45,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44212 deadline: 1734032025998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:46,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:46,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44256 deadline: 1734032026007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:46,075 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:46,077 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T19:32:46,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:46,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:46,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:46,077 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:46,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:46,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:46,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-12T19:32:46,230 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:46,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T19:32:46,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:46,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:46,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:46,232 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:46,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:46,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:46,295 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/f710db0f8b2a4bffab8e9ad326b35b5c 2024-12-12T19:32:46,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/bce9068fcf9b44ad98977f2f0e7bbc3d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/bce9068fcf9b44ad98977f2f0e7bbc3d 2024-12-12T19:32:46,316 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/bce9068fcf9b44ad98977f2f0e7bbc3d, entries=200, sequenceid=43, filesize=38.6 K 2024-12-12T19:32:46,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/d609d8a023df4093b835526bfb7917b1 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/d609d8a023df4093b835526bfb7917b1 2024-12-12T19:32:46,324 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/d609d8a023df4093b835526bfb7917b1, entries=150, sequenceid=43, filesize=11.7 K 2024-12-12T19:32:46,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/f710db0f8b2a4bffab8e9ad326b35b5c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f710db0f8b2a4bffab8e9ad326b35b5c 2024-12-12T19:32:46,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f710db0f8b2a4bffab8e9ad326b35b5c, entries=150, sequenceid=43, filesize=11.7 K 2024-12-12T19:32:46,363 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=53.67 KB/54960 for 2c602e7d1968409c889fb12ef89e5146 in 1595ms, sequenceid=43, compaction requested=false 2024-12-12T19:32:46,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:46,385 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:46,389 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T19:32:46,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:46,389 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T19:32:46,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:32:46,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:46,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:32:46,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:46,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:32:46,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a086b330102546a9ac074a21aeabc9cd_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_1/A:col10/1734031964804/Put/seqid=0 2024-12-12T19:32:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741931_1107 (size=9714) 2024-12-12T19:32:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:46,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:46,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032026720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:46,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:46,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032026835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:46,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:46,908 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a086b330102546a9ac074a21aeabc9cd_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a086b330102546a9ac074a21aeabc9cd_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:46,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/91e35e09cd924ffa8d79216341fcee8e, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:46,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/91e35e09cd924ffa8d79216341fcee8e is 175, key is test_row_1/A:col10/1734031964804/Put/seqid=0 2024-12-12T19:32:46,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741932_1108 (size=22361) 2024-12-12T19:32:46,959 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/91e35e09cd924ffa8d79216341fcee8e 2024-12-12T19:32:46,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:46,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44244 deadline: 1734032026974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:47,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/bd319ac88ee24884b3ae886e245bb89a is 50, key is test_row_1/B:col10/1734031964804/Put/seqid=0 2024-12-12T19:32:47,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:47,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44254 deadline: 1734032027009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:47,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:47,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44212 deadline: 1734032027010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:47,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:47,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44256 deadline: 1734032027023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:47,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741933_1109 (size=9657) 2024-12-12T19:32:47,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:47,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032027049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:47,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:47,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032027364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:47,444 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/bd319ac88ee24884b3ae886e245bb89a 2024-12-12T19:32:47,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/b4aeaab8938644d18c8e9a6955c56a11 is 50, key is test_row_1/C:col10/1734031964804/Put/seqid=0 2024-12-12T19:32:47,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741934_1110 (size=9657) 2024-12-12T19:32:47,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:47,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032027871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:47,974 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/b4aeaab8938644d18c8e9a6955c56a11 2024-12-12T19:32:48,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/91e35e09cd924ffa8d79216341fcee8e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/91e35e09cd924ffa8d79216341fcee8e 2024-12-12T19:32:48,010 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/91e35e09cd924ffa8d79216341fcee8e, entries=100, sequenceid=54, filesize=21.8 K 2024-12-12T19:32:48,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/bd319ac88ee24884b3ae886e245bb89a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/bd319ac88ee24884b3ae886e245bb89a 2024-12-12T19:32:48,046 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/bd319ac88ee24884b3ae886e245bb89a, entries=100, sequenceid=54, filesize=9.4 K 2024-12-12T19:32:48,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/b4aeaab8938644d18c8e9a6955c56a11 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/b4aeaab8938644d18c8e9a6955c56a11 2024-12-12T19:32:48,064 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/b4aeaab8938644d18c8e9a6955c56a11, entries=100, sequenceid=54, filesize=9.4 K 2024-12-12T19:32:48,065 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 2c602e7d1968409c889fb12ef89e5146 in 1676ms, sequenceid=54, compaction requested=true 2024-12-12T19:32:48,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:48,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:48,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=40 2024-12-12T19:32:48,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=40 2024-12-12T19:32:48,072 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-12-12T19:32:48,072 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.0710 sec 2024-12-12T19:32:48,079 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees in 4.0820 sec 2024-12-12T19:32:48,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-12T19:32:48,128 INFO [Thread-526 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 39 completed 2024-12-12T19:32:48,137 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:32:48,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees 2024-12-12T19:32:48,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T19:32:48,140 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:32:48,141 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:32:48,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:32:48,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T19:32:48,300 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:48,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-12T19:32:48,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:48,301 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T19:32:48,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:32:48,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:48,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:32:48,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:48,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:32:48,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:48,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124df5fef78bcb430b9d0ac4d4f0c975d4_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_0/A:col10/1734031966692/Put/seqid=0 2024-12-12T19:32:48,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741935_1111 (size=12154) 2024-12-12T19:32:48,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T19:32:48,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T19:32:48,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:48,850 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124df5fef78bcb430b9d0ac4d4f0c975d4_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124df5fef78bcb430b9d0ac4d4f0c975d4_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:48,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/e42aae612ebe4db4bb8b523cb9b3cf77, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:48,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/e42aae612ebe4db4bb8b523cb9b3cf77 is 175, key is test_row_0/A:col10/1734031966692/Put/seqid=0 2024-12-12T19:32:48,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:48,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:48,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741936_1112 (size=30955) 2024-12-12T19:32:48,913 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/e42aae612ebe4db4bb8b523cb9b3cf77 2024-12-12T19:32:48,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/c58d6c8fcc96437e9ce3f2e40c1900b7 is 50, key is test_row_0/B:col10/1734031966692/Put/seqid=0 2024-12-12T19:32:48,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:48,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032028943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:48,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741937_1113 (size=12001) 2024-12-12T19:32:48,970 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/c58d6c8fcc96437e9ce3f2e40c1900b7 2024-12-12T19:32:48,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/cfdaf60449f64f59a78d36bc7fab5b1a is 50, key is test_row_0/C:col10/1734031966692/Put/seqid=0 2024-12-12T19:32:48,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:48,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44244 deadline: 1734032028996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:48,999 DEBUG [Thread-518 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4209 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:32:49,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:49,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44212 deadline: 1734032029021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:49,025 DEBUG [Thread-520 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4177 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:32:49,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:49,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44254 deadline: 1734032029025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:49,029 DEBUG [Thread-516 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4181 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:32:49,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741938_1114 (size=12001) 2024-12-12T19:32:49,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:49,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44256 deadline: 1734032029040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:49,044 DEBUG [Thread-522 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4201 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:32:49,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:49,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032029049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:49,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T19:32:49,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:49,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032029255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:49,442 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/cfdaf60449f64f59a78d36bc7fab5b1a 2024-12-12T19:32:49,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/e42aae612ebe4db4bb8b523cb9b3cf77 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/e42aae612ebe4db4bb8b523cb9b3cf77 2024-12-12T19:32:49,467 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/e42aae612ebe4db4bb8b523cb9b3cf77, entries=150, sequenceid=79, filesize=30.2 K 2024-12-12T19:32:49,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/c58d6c8fcc96437e9ce3f2e40c1900b7 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/c58d6c8fcc96437e9ce3f2e40c1900b7 2024-12-12T19:32:49,486 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/c58d6c8fcc96437e9ce3f2e40c1900b7, entries=150, sequenceid=79, filesize=11.7 K 2024-12-12T19:32:49,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/cfdaf60449f64f59a78d36bc7fab5b1a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/cfdaf60449f64f59a78d36bc7fab5b1a 2024-12-12T19:32:49,519 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/cfdaf60449f64f59a78d36bc7fab5b1a, entries=150, sequenceid=79, filesize=11.7 K 2024-12-12T19:32:49,522 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 2c602e7d1968409c889fb12ef89e5146 in 1220ms, sequenceid=79, compaction requested=true 2024-12-12T19:32:49,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:49,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:49,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-12-12T19:32:49,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=42 2024-12-12T19:32:49,538 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-12-12T19:32:49,538 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3890 sec 2024-12-12T19:32:49,541 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees in 1.4020 sec 2024-12-12T19:32:49,597 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:32:49,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:32:49,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:49,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:32:49,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:49,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:32:49,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:49,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:49,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a404ddfb3ce7481da117b9a92ae7569d_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_0/A:col10/1734031969586/Put/seqid=0 2024-12-12T19:32:49,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741939_1115 (size=14594) 2024-12-12T19:32:49,755 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:49,782 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a404ddfb3ce7481da117b9a92ae7569d_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a404ddfb3ce7481da117b9a92ae7569d_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:49,786 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/a73bc7b5df06460e931085dc2ea02af1, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:49,787 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/a73bc7b5df06460e931085dc2ea02af1 is 175, key is test_row_0/A:col10/1734031969586/Put/seqid=0 2024-12-12T19:32:49,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741940_1116 (size=39549) 2024-12-12T19:32:49,840 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=91, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/a73bc7b5df06460e931085dc2ea02af1 2024-12-12T19:32:49,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:49,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032029889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:49,907 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/dfd4ba19429e4c04a5261a92ddcce1dd is 50, key is test_row_0/B:col10/1734031969586/Put/seqid=0 2024-12-12T19:32:49,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741941_1117 (size=12001) 2024-12-12T19:32:49,976 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/dfd4ba19429e4c04a5261a92ddcce1dd 2024-12-12T19:32:50,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:50,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032030000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:50,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/0c20e14fa3224017abd7fbd33cb1260a is 50, key is test_row_0/C:col10/1734031969586/Put/seqid=0 2024-12-12T19:32:50,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741942_1118 (size=12001) 2024-12-12T19:32:50,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/0c20e14fa3224017abd7fbd33cb1260a 2024-12-12T19:32:50,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/a73bc7b5df06460e931085dc2ea02af1 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/a73bc7b5df06460e931085dc2ea02af1 2024-12-12T19:32:50,071 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/a73bc7b5df06460e931085dc2ea02af1, entries=200, sequenceid=91, filesize=38.6 K 2024-12-12T19:32:50,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/dfd4ba19429e4c04a5261a92ddcce1dd as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/dfd4ba19429e4c04a5261a92ddcce1dd 2024-12-12T19:32:50,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/dfd4ba19429e4c04a5261a92ddcce1dd, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T19:32:50,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/0c20e14fa3224017abd7fbd33cb1260a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/0c20e14fa3224017abd7fbd33cb1260a 2024-12-12T19:32:50,088 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/0c20e14fa3224017abd7fbd33cb1260a, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T19:32:50,089 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 2c602e7d1968409c889fb12ef89e5146 in 492ms, sequenceid=91, compaction requested=true 2024-12-12T19:32:50,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:50,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:32:50,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:50,089 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-12T19:32:50,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:32:50,090 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:50,090 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-12T19:32:50,090 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:32:50,090 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:50,116 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 163369 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-12T19:32:50,116 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/A is initiating minor compaction (all files) 2024-12-12T19:32:50,116 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/A in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:50,116 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/99608738123b4a11a5887f145f01b50a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/bce9068fcf9b44ad98977f2f0e7bbc3d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/91e35e09cd924ffa8d79216341fcee8e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/e42aae612ebe4db4bb8b523cb9b3cf77, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/a73bc7b5df06460e931085dc2ea02af1] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=159.5 K 2024-12-12T19:32:50,117 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:50,117 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/99608738123b4a11a5887f145f01b50a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/bce9068fcf9b44ad98977f2f0e7bbc3d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/91e35e09cd924ffa8d79216341fcee8e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/e42aae612ebe4db4bb8b523cb9b3cf77, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/a73bc7b5df06460e931085dc2ea02af1] 2024-12-12T19:32:50,119 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99608738123b4a11a5887f145f01b50a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734031964001 2024-12-12T19:32:50,119 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 57661 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-12T19:32:50,119 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/B is initiating minor compaction (all files) 2024-12-12T19:32:50,119 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/B in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:50,119 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/06c29e6bbea34ad8bb844967201e21f9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/d609d8a023df4093b835526bfb7917b1, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/bd319ac88ee24884b3ae886e245bb89a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/c58d6c8fcc96437e9ce3f2e40c1900b7, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/dfd4ba19429e4c04a5261a92ddcce1dd] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=56.3 K 2024-12-12T19:32:50,120 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting bce9068fcf9b44ad98977f2f0e7bbc3d, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1734031964096 2024-12-12T19:32:50,120 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 06c29e6bbea34ad8bb844967201e21f9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734031964001 2024-12-12T19:32:50,120 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91e35e09cd924ffa8d79216341fcee8e, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734031964787 2024-12-12T19:32:50,120 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting d609d8a023df4093b835526bfb7917b1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1734031964096 2024-12-12T19:32:50,121 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting bd319ac88ee24884b3ae886e245bb89a, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734031964787 2024-12-12T19:32:50,121 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting e42aae612ebe4db4bb8b523cb9b3cf77, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734031966681 2024-12-12T19:32:50,121 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting c58d6c8fcc96437e9ce3f2e40c1900b7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734031966681 2024-12-12T19:32:50,121 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting a73bc7b5df06460e931085dc2ea02af1, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734031968883 2024-12-12T19:32:50,124 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting dfd4ba19429e4c04a5261a92ddcce1dd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734031968883 2024-12-12T19:32:50,227 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#B#compaction#96 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:50,227 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/856472a57c2f4caaadcfed9d73835a56 is 50, key is test_row_0/B:col10/1734031969586/Put/seqid=0 2024-12-12T19:32:50,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:50,236 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T19:32:50,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:32:50,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:50,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:32:50,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:50,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:32:50,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:50,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T19:32:50,248 INFO [Thread-526 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-12-12T19:32:50,250 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:50,254 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:32:50,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-12-12T19:32:50,256 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:32:50,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-12T19:32:50,262 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:32:50,262 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:32:50,272 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212ac6743fdc7da4929bb77b094c4f22ab5_2c602e7d1968409c889fb12ef89e5146 store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:50,276 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212ac6743fdc7da4929bb77b094c4f22ab5_2c602e7d1968409c889fb12ef89e5146, store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:50,276 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ac6743fdc7da4929bb77b094c4f22ab5_2c602e7d1968409c889fb12ef89e5146 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:50,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741943_1119 (size=12173) 2024-12-12T19:32:50,297 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121299143ae49cb346ffa444e150d605ed39_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_0/A:col10/1734031969887/Put/seqid=0 2024-12-12T19:32:50,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:50,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032030309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:50,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741944_1120 (size=4469) 2024-12-12T19:32:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-12T19:32:50,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741945_1121 (size=14594) 2024-12-12T19:32:50,415 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:50,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T19:32:50,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:50,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:50,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:50,420 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:50,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:50,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:50,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:50,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032030420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:50,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-12T19:32:50,576 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:50,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T19:32:50,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:50,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:50,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:50,579 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:50,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:50,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:50,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:50,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032030631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:50,697 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/856472a57c2f4caaadcfed9d73835a56 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/856472a57c2f4caaadcfed9d73835a56 2024-12-12T19:32:50,706 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/B of 2c602e7d1968409c889fb12ef89e5146 into 856472a57c2f4caaadcfed9d73835a56(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:50,706 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:50,707 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/B, priority=11, startTime=1734031970089; duration=0sec 2024-12-12T19:32:50,707 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:50,707 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:B 2024-12-12T19:32:50,707 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-12T19:32:50,710 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 57661 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-12T19:32:50,710 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/C is initiating minor compaction (all files) 2024-12-12T19:32:50,710 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/C in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:50,710 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/0e29d29a60b34f8cbaf00c63c4f6e291, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f710db0f8b2a4bffab8e9ad326b35b5c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/b4aeaab8938644d18c8e9a6955c56a11, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/cfdaf60449f64f59a78d36bc7fab5b1a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/0c20e14fa3224017abd7fbd33cb1260a] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=56.3 K 2024-12-12T19:32:50,711 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e29d29a60b34f8cbaf00c63c4f6e291, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734031964001 2024-12-12T19:32:50,711 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting f710db0f8b2a4bffab8e9ad326b35b5c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1734031964096 2024-12-12T19:32:50,712 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting b4aeaab8938644d18c8e9a6955c56a11, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734031964787 2024-12-12T19:32:50,712 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting cfdaf60449f64f59a78d36bc7fab5b1a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734031966681 2024-12-12T19:32:50,713 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c20e14fa3224017abd7fbd33cb1260a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734031968883 2024-12-12T19:32:50,734 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:50,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T19:32:50,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:50,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:50,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:50,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:50,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:50,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:50,754 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#A#compaction#97 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:50,756 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/ca2f78e1dc084ab5b29953f72fb31ed1 is 175, key is test_row_0/A:col10/1734031969586/Put/seqid=0 2024-12-12T19:32:50,764 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#C#compaction#99 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:50,765 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/b642c18affd9420bac19fe1eb39ad308 is 50, key is test_row_0/C:col10/1734031969586/Put/seqid=0 2024-12-12T19:32:50,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741946_1122 (size=31127) 2024-12-12T19:32:50,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741947_1123 (size=12173) 2024-12-12T19:32:50,782 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:50,795 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121299143ae49cb346ffa444e150d605ed39_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121299143ae49cb346ffa444e150d605ed39_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:50,803 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/43ed2f36cf934b7eab03e9720fd053af, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:50,804 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/43ed2f36cf934b7eab03e9720fd053af is 175, key is test_row_0/A:col10/1734031969887/Put/seqid=0 2024-12-12T19:32:50,815 INFO [master/4c9c438b6eeb:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-12T19:32:50,815 INFO [master/4c9c438b6eeb:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-12T19:32:50,836 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/b642c18affd9420bac19fe1eb39ad308 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/b642c18affd9420bac19fe1eb39ad308 2024-12-12T19:32:50,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741948_1124 (size=39549) 2024-12-12T19:32:50,853 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/C of 2c602e7d1968409c889fb12ef89e5146 into b642c18affd9420bac19fe1eb39ad308(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:50,853 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:50,853 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/C, priority=11, startTime=1734031970090; duration=0sec 2024-12-12T19:32:50,853 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:50,853 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:C 2024-12-12T19:32:50,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-12T19:32:50,888 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:50,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T19:32:50,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:50,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:50,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:50,890 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:50,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:50,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:50,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:50,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032030943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:51,049 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:51,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T19:32:51,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:51,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:51,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:51,056 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,198 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/ca2f78e1dc084ab5b29953f72fb31ed1 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ca2f78e1dc084ab5b29953f72fb31ed1 2024-12-12T19:32:51,205 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/A of 2c602e7d1968409c889fb12ef89e5146 into ca2f78e1dc084ab5b29953f72fb31ed1(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-12-12T19:32:51,205 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:51,205 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/A, priority=11, startTime=1734031970089; duration=1sec 2024-12-12T19:32:51,206 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:51,206 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:A 2024-12-12T19:32:51,213 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:51,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T19:32:51,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:51,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:51,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:51,216 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,241 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=116, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/43ed2f36cf934b7eab03e9720fd053af 2024-12-12T19:32:51,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/20f786bcfa964bb9bc468d943eeafe93 is 50, key is test_row_0/B:col10/1734031969887/Put/seqid=0 2024-12-12T19:32:51,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741949_1125 (size=12001) 2024-12-12T19:32:51,304 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/20f786bcfa964bb9bc468d943eeafe93 2024-12-12T19:32:51,342 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/e1ecc442f2984271a10285a1344b3d1f is 50, key is test_row_0/C:col10/1734031969887/Put/seqid=0 2024-12-12T19:32:51,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-12T19:32:51,369 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:51,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T19:32:51,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:51,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:51,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:51,374 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741950_1126 (size=12001) 2024-12-12T19:32:51,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:51,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032031452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:51,535 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:51,536 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T19:32:51,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:51,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:51,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:51,536 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,688 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:51,691 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T19:32:51,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:51,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:51,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:51,692 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/e1ecc442f2984271a10285a1344b3d1f 2024-12-12T19:32:51,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/43ed2f36cf934b7eab03e9720fd053af as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/43ed2f36cf934b7eab03e9720fd053af 2024-12-12T19:32:51,851 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:51,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T19:32:51,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:51,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:51,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:51,855 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:51,872 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/43ed2f36cf934b7eab03e9720fd053af, entries=200, sequenceid=116, filesize=38.6 K 2024-12-12T19:32:51,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/20f786bcfa964bb9bc468d943eeafe93 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/20f786bcfa964bb9bc468d943eeafe93 2024-12-12T19:32:51,904 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/20f786bcfa964bb9bc468d943eeafe93, entries=150, sequenceid=116, filesize=11.7 K 2024-12-12T19:32:51,905 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/e1ecc442f2984271a10285a1344b3d1f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/e1ecc442f2984271a10285a1344b3d1f 2024-12-12T19:32:51,928 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/e1ecc442f2984271a10285a1344b3d1f, entries=150, sequenceid=116, filesize=11.7 K 2024-12-12T19:32:51,935 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 2c602e7d1968409c889fb12ef89e5146 in 1699ms, sequenceid=116, compaction requested=false 2024-12-12T19:32:51,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:52,013 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:52,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T19:32:52,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:52,016 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T19:32:52,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:32:52,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:52,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:32:52,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:52,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:32:52,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:52,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212806997cd89eb4c54b0f7f43adc9bec5a_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_0/A:col10/1734031970305/Put/seqid=0 2024-12-12T19:32:52,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741951_1127 (size=12154) 2024-12-12T19:32:52,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-12T19:32:52,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:52,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:52,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:52,562 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212806997cd89eb4c54b0f7f43adc9bec5a_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212806997cd89eb4c54b0f7f43adc9bec5a_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:52,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/ea6ea2b9f2654ae8ab5b5f1c270f6e4e, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:52,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/ea6ea2b9f2654ae8ab5b5f1c270f6e4e is 175, key is test_row_0/A:col10/1734031970305/Put/seqid=0 2024-12-12T19:32:52,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741952_1128 (size=30955) 2024-12-12T19:32:52,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:52,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032032637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:52,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:52,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032032741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:52,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:52,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032032947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:53,000 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=130, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/ea6ea2b9f2654ae8ab5b5f1c270f6e4e 2024-12-12T19:32:53,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:53,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44244 deadline: 1734032033008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:53,012 DEBUG [Thread-518 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8223 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:32:53,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/5626e4ed02ae410b9f0251ea4bac30f5 is 50, key is test_row_0/B:col10/1734031970305/Put/seqid=0 2024-12-12T19:32:53,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:53,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44212 deadline: 1734032033047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:53,051 DEBUG [Thread-520 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8203 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:32:53,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:53,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:53,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44256 deadline: 1734032033052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:53,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44254 deadline: 1734032033051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:53,058 DEBUG [Thread-516 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8209 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:32:53,060 DEBUG [Thread-522 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8217 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:32:53,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741953_1129 (size=12001) 2024-12-12T19:32:53,092 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/5626e4ed02ae410b9f0251ea4bac30f5 2024-12-12T19:32:53,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/a43bbcbbe70b4ba0af55f62df3981113 is 50, key is test_row_0/C:col10/1734031970305/Put/seqid=0 2024-12-12T19:32:53,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741954_1130 (size=12001) 2024-12-12T19:32:53,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032033264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:53,639 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/a43bbcbbe70b4ba0af55f62df3981113 2024-12-12T19:32:53,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/ea6ea2b9f2654ae8ab5b5f1c270f6e4e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ea6ea2b9f2654ae8ab5b5f1c270f6e4e 2024-12-12T19:32:53,681 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ea6ea2b9f2654ae8ab5b5f1c270f6e4e, entries=150, sequenceid=130, filesize=30.2 K 2024-12-12T19:32:53,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/5626e4ed02ae410b9f0251ea4bac30f5 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/5626e4ed02ae410b9f0251ea4bac30f5 2024-12-12T19:32:53,702 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/5626e4ed02ae410b9f0251ea4bac30f5, entries=150, sequenceid=130, filesize=11.7 K 2024-12-12T19:32:53,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/a43bbcbbe70b4ba0af55f62df3981113 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a43bbcbbe70b4ba0af55f62df3981113 2024-12-12T19:32:53,715 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a43bbcbbe70b4ba0af55f62df3981113, entries=150, sequenceid=130, filesize=11.7 K 2024-12-12T19:32:53,724 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 2c602e7d1968409c889fb12ef89e5146 in 1708ms, sequenceid=130, compaction requested=true 2024-12-12T19:32:53,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:53,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:53,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-12-12T19:32:53,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-12-12T19:32:53,733 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-12T19:32:53,733 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.4690 sec 2024-12-12T19:32:53,737 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 3.4800 sec 2024-12-12T19:32:53,771 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T19:32:53,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:32:53,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:53,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:32:53,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:53,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:32:53,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:53,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:53,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a5b1e6aeec4f4f50a04856c11c508c08_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_0/A:col10/1734031973770/Put/seqid=0 2024-12-12T19:32:53,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:53,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032033826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:53,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741955_1131 (size=14794) 2024-12-12T19:32:53,847 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:53,860 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a5b1e6aeec4f4f50a04856c11c508c08_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a5b1e6aeec4f4f50a04856c11c508c08_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:53,865 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/e21481621ce642c884b36ea73248af5c, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:53,866 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/e21481621ce642c884b36ea73248af5c is 175, key is test_row_0/A:col10/1734031973770/Put/seqid=0 2024-12-12T19:32:53,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741956_1132 (size=39749) 2024-12-12T19:32:53,918 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=156, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/e21481621ce642c884b36ea73248af5c 2024-12-12T19:32:53,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:53,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032033934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:53,961 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/f64f1b5dc9e14c6a85bf99bfade53bf7 is 50, key is test_row_0/B:col10/1734031973770/Put/seqid=0 2024-12-12T19:32:54,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741957_1133 (size=12151) 2024-12-12T19:32:54,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:54,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032034143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:54,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-12T19:32:54,368 INFO [Thread-526 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-12-12T19:32:54,376 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:32:54,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-12-12T19:32:54,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T19:32:54,391 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:32:54,392 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:32:54,392 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:32:54,416 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/f64f1b5dc9e14c6a85bf99bfade53bf7 2024-12-12T19:32:54,453 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/a425942f4a0d4572948fc532bf350f9b is 50, key is test_row_0/C:col10/1734031973770/Put/seqid=0 2024-12-12T19:32:54,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:54,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032034460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:54,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741958_1134 (size=12151) 2024-12-12T19:32:54,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T19:32:54,547 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:54,547 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T19:32:54,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:54,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:54,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:54,548 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:54,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:54,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:54,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T19:32:54,700 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:54,704 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T19:32:54,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:54,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:54,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:54,705 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:54,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:54,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:54,863 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:54,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T19:32:54,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:54,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:54,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:54,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:54,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:54,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:54,875 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/a425942f4a0d4572948fc532bf350f9b 2024-12-12T19:32:54,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/e21481621ce642c884b36ea73248af5c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/e21481621ce642c884b36ea73248af5c 2024-12-12T19:32:54,936 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/e21481621ce642c884b36ea73248af5c, entries=200, sequenceid=156, filesize=38.8 K 2024-12-12T19:32:54,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/f64f1b5dc9e14c6a85bf99bfade53bf7 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/f64f1b5dc9e14c6a85bf99bfade53bf7 2024-12-12T19:32:54,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/f64f1b5dc9e14c6a85bf99bfade53bf7, entries=150, sequenceid=156, filesize=11.9 K 2024-12-12T19:32:54,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:54,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032034979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:54,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/a425942f4a0d4572948fc532bf350f9b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a425942f4a0d4572948fc532bf350f9b 2024-12-12T19:32:54,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T19:32:55,023 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:55,024 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T19:32:55,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:55,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:55,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:55,024 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:55,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:55,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:55,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a425942f4a0d4572948fc532bf350f9b, entries=150, sequenceid=156, filesize=11.9 K 2024-12-12T19:32:55,032 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 2c602e7d1968409c889fb12ef89e5146 in 1260ms, sequenceid=156, compaction requested=true 2024-12-12T19:32:55,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:55,032 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:32:55,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:32:55,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:55,033 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:32:55,034 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141380 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:32:55,034 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/A is initiating minor compaction (all files) 2024-12-12T19:32:55,034 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/A in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:55,035 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ca2f78e1dc084ab5b29953f72fb31ed1, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/43ed2f36cf934b7eab03e9720fd053af, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ea6ea2b9f2654ae8ab5b5f1c270f6e4e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/e21481621ce642c884b36ea73248af5c] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=138.1 K 2024-12-12T19:32:55,035 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:55,035 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ca2f78e1dc084ab5b29953f72fb31ed1, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/43ed2f36cf934b7eab03e9720fd053af, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ea6ea2b9f2654ae8ab5b5f1c270f6e4e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/e21481621ce642c884b36ea73248af5c] 2024-12-12T19:32:55,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:32:55,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:55,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:32:55,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:55,042 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48326 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:32:55,042 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/B is initiating minor compaction (all files) 2024-12-12T19:32:55,042 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/B in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:55,043 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/856472a57c2f4caaadcfed9d73835a56, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/20f786bcfa964bb9bc468d943eeafe93, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/5626e4ed02ae410b9f0251ea4bac30f5, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/f64f1b5dc9e14c6a85bf99bfade53bf7] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=47.2 K 2024-12-12T19:32:55,043 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca2f78e1dc084ab5b29953f72fb31ed1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734031968883 2024-12-12T19:32:55,048 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 856472a57c2f4caaadcfed9d73835a56, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734031968883 2024-12-12T19:32:55,049 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43ed2f36cf934b7eab03e9720fd053af, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1734031969748 2024-12-12T19:32:55,055 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 20f786bcfa964bb9bc468d943eeafe93, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1734031969883 2024-12-12T19:32:55,060 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 5626e4ed02ae410b9f0251ea4bac30f5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1734031970241 2024-12-12T19:32:55,055 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea6ea2b9f2654ae8ab5b5f1c270f6e4e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1734031970241 2024-12-12T19:32:55,060 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting f64f1b5dc9e14c6a85bf99bfade53bf7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1734031972540 2024-12-12T19:32:55,061 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting e21481621ce642c884b36ea73248af5c, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1734031972540 2024-12-12T19:32:55,117 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:55,120 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#B#compaction#108 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:55,121 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/024cfc2629ea47818c0fb23e629c7e9e is 50, key is test_row_0/B:col10/1734031973770/Put/seqid=0 2024-12-12T19:32:55,124 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212699d1af75b884866ada0c7b3b634eb3a_2c602e7d1968409c889fb12ef89e5146 store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:55,127 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212699d1af75b884866ada0c7b3b634eb3a_2c602e7d1968409c889fb12ef89e5146, store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:55,127 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212699d1af75b884866ada0c7b3b634eb3a_2c602e7d1968409c889fb12ef89e5146 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:55,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741959_1135 (size=12459) 2024-12-12T19:32:55,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741960_1136 (size=4469) 2024-12-12T19:32:55,168 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#A#compaction#109 average throughput is 0.48 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:55,170 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/93cbe951f33a4533913cf7e876b9bc8c is 175, key is test_row_0/A:col10/1734031973770/Put/seqid=0 2024-12-12T19:32:55,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741961_1137 (size=31413) 2024-12-12T19:32:55,187 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:55,187 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T19:32:55,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:55,190 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-12T19:32:55,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:32:55,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:55,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:32:55,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:55,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:32:55,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:55,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212524e67b6b29b4761b8703eada2ad918a_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_0/A:col10/1734031973777/Put/seqid=0 2024-12-12T19:32:55,239 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/93cbe951f33a4533913cf7e876b9bc8c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/93cbe951f33a4533913cf7e876b9bc8c 2024-12-12T19:32:55,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741962_1138 (size=12304) 2024-12-12T19:32:55,265 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/A of 2c602e7d1968409c889fb12ef89e5146 into 93cbe951f33a4533913cf7e876b9bc8c(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:55,265 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:55,265 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/A, priority=12, startTime=1734031975032; duration=0sec 2024-12-12T19:32:55,266 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:55,266 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:A 2024-12-12T19:32:55,266 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:32:55,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,285 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48326 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:32:55,285 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/C is initiating minor compaction (all files) 2024-12-12T19:32:55,286 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/C in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:55,286 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/b642c18affd9420bac19fe1eb39ad308, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/e1ecc442f2984271a10285a1344b3d1f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a43bbcbbe70b4ba0af55f62df3981113, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a425942f4a0d4572948fc532bf350f9b] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=47.2 K 2024-12-12T19:32:55,297 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting b642c18affd9420bac19fe1eb39ad308, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734031968883 2024-12-12T19:32:55,299 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1ecc442f2984271a10285a1344b3d1f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1734031969883 2024-12-12T19:32:55,303 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting a43bbcbbe70b4ba0af55f62df3981113, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1734031970241 2024-12-12T19:32:55,303 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212524e67b6b29b4761b8703eada2ad918a_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212524e67b6b29b4761b8703eada2ad918a_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:55,304 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting a425942f4a0d4572948fc532bf350f9b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1734031972540 2024-12-12T19:32:55,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/0287589c6a734ba3bca07f9cbaca15e5, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:55,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/0287589c6a734ba3bca07f9cbaca15e5 is 175, key is test_row_0/A:col10/1734031973777/Put/seqid=0 2024-12-12T19:32:55,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741963_1139 (size=31105) 2024-12-12T19:32:55,332 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=166, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/0287589c6a734ba3bca07f9cbaca15e5 2024-12-12T19:32:55,354 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#C#compaction#111 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:55,354 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/a491b5fcfae44d1994b28b41ff7d5cbd is 50, key is test_row_0/C:col10/1734031973770/Put/seqid=0 2024-12-12T19:32:55,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741964_1140 (size=12459) 2024-12-12T19:32:55,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/945e42ced943495390e4286ef8d8f545 is 50, key is test_row_0/B:col10/1734031973777/Put/seqid=0 2024-12-12T19:32:55,383 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/a491b5fcfae44d1994b28b41ff7d5cbd as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a491b5fcfae44d1994b28b41ff7d5cbd 2024-12-12T19:32:55,389 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/C of 2c602e7d1968409c889fb12ef89e5146 into a491b5fcfae44d1994b28b41ff7d5cbd(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:55,389 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:55,389 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/C, priority=12, startTime=1734031975039; duration=0sec 2024-12-12T19:32:55,389 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:55,389 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:C 2024-12-12T19:32:55,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741965_1141 (size=12151) 2024-12-12T19:32:55,392 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/945e42ced943495390e4286ef8d8f545 2024-12-12T19:32:55,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/70a379f27c314012b5162fbd8a52ddc6 is 50, key is test_row_0/C:col10/1734031973777/Put/seqid=0 2024-12-12T19:32:55,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741966_1142 (size=12151) 2024-12-12T19:32:55,416 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/70a379f27c314012b5162fbd8a52ddc6 2024-12-12T19:32:55,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/0287589c6a734ba3bca07f9cbaca15e5 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/0287589c6a734ba3bca07f9cbaca15e5 2024-12-12T19:32:55,430 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/0287589c6a734ba3bca07f9cbaca15e5, entries=150, sequenceid=166, filesize=30.4 K 2024-12-12T19:32:55,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/945e42ced943495390e4286ef8d8f545 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/945e42ced943495390e4286ef8d8f545 2024-12-12T19:32:55,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,438 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/945e42ced943495390e4286ef8d8f545, entries=150, sequenceid=166, filesize=11.9 K 2024-12-12T19:32:55,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/70a379f27c314012b5162fbd8a52ddc6 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/70a379f27c314012b5162fbd8a52ddc6 2024-12-12T19:32:55,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,463 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/70a379f27c314012b5162fbd8a52ddc6, entries=150, sequenceid=166, filesize=11.9 K 2024-12-12T19:32:55,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,464 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for 2c602e7d1968409c889fb12ef89e5146 in 274ms, sequenceid=166, compaction requested=false 2024-12-12T19:32:55,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:55,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:55,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-12-12T19:32:55,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-12-12T19:32:55,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,467 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-12T19:32:55,467 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0730 sec 2024-12-12T19:32:55,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,469 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 1.0910 sec 2024-12-12T19:32:55,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T19:32:55,497 INFO [Thread-526 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-12-12T19:32:55,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,500 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:32:55,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-12T19:32:55,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-12T19:32:55,502 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:32:55,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,503 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:32:55,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,503 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:32:55,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,575 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/024cfc2629ea47818c0fb23e629c7e9e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/024cfc2629ea47818c0fb23e629c7e9e 2024-12-12T19:32:55,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-12T19:32:55,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,604 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/B of 2c602e7d1968409c889fb12ef89e5146 into 024cfc2629ea47818c0fb23e629c7e9e(size=12.2 K), total size for store is 24.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:55,604 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:55,604 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/B, priority=12, startTime=1734031975032; duration=0sec 2024-12-12T19:32:55,604 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:55,604 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:B 2024-12-12T19:32:55,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,659 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:55,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-12T19:32:55,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:55,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:55,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:55,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-12T19:32:55,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-12T19:32:55,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-12T19:32:55,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 159 msec 2024-12-12T19:32:55,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,669 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 164 msec 2024-12-12T19:32:55,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-12T19:32:55,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,804 INFO [Thread-526 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-12T19:32:55,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,808 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:32:55,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-12T19:32:55,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T19:32:55,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,815 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:32:55,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,816 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:32:55,816 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:32:55,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T19:32:55,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,971 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:55,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T19:32:55,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:55,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:55,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:55,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-12T19:32:55,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-12T19:32:55,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,996 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-12T19:32:55,996 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 168 msec 2024-12-12T19:32:55,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:55,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,001 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 192 msec 2024-12-12T19:32:56,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T19:32:56,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,120 INFO [Thread-526 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-12T19:32:56,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,140 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:32:56,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-12T19:32:56,142 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:32:56,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,146 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:32:56,146 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:32:56,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T19:32:56,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T19:32:56,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,255 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T19:32:56,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:32:56,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:56,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:32:56,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:56,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:32:56,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:56,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:56,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121203c7655b0e0d4a288f04006a39876dd2_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_0/A:col10/1734031976222/Put/seqid=0 2024-12-12T19:32:56,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,298 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:56,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T19:32:56,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:56,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:56,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:56,304 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:56,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:56,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:56,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741967_1143 (size=22268) 2024-12-12T19:32:56,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,323 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:56,344 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121203c7655b0e0d4a288f04006a39876dd2_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121203c7655b0e0d4a288f04006a39876dd2_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:56,345 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/ada4799cdddb41d987ce09814a1d485b, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:56,346 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/ada4799cdddb41d987ce09814a1d485b is 175, key is test_row_0/A:col10/1734031976222/Put/seqid=0 2024-12-12T19:32:56,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741968_1144 (size=65673) 2024-12-12T19:32:56,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:56,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032036414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:56,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T19:32:56,456 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:56,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T19:32:56,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:56,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:56,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:56,457 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:56,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:56,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:56,522 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:56,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032036519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:56,611 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:56,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T19:32:56,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:56,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:56,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:56,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:56,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:56,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:56,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:56,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032036735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:56,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T19:32:56,765 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:56,766 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T19:32:56,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:56,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:56,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:56,767 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:56,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:56,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:56,787 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=182, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/ada4799cdddb41d987ce09814a1d485b 2024-12-12T19:32:56,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/e34a6171f04841f5b80d99028842d23a is 50, key is test_row_0/B:col10/1734031976222/Put/seqid=0 2024-12-12T19:32:56,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741969_1145 (size=12151) 2024-12-12T19:32:56,939 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:56,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T19:32:56,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:56,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:56,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:56,940 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:56,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:56,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:57,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:57,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032037038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:57,098 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:57,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T19:32:57,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:57,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:57,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:57,099 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:57,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:57,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:57,251 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:57,253 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T19:32:57,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:57,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:57,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:57,255 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:57,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:57,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:57,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T19:32:57,268 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=182 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/e34a6171f04841f5b80d99028842d23a 2024-12-12T19:32:57,290 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/a70ff57801ce473e8d291b23bece196c is 50, key is test_row_0/C:col10/1734031976222/Put/seqid=0 2024-12-12T19:32:57,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741970_1146 (size=12151) 2024-12-12T19:32:57,411 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:57,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T19:32:57,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:57,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:57,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:57,412 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:57,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:57,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:57,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:57,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032037548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:57,580 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:57,581 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T19:32:57,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:57,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:57,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:57,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:57,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:57,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:57,724 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=182 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/a70ff57801ce473e8d291b23bece196c 2024-12-12T19:32:57,735 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:57,739 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T19:32:57,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:57,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:57,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:57,743 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:57,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:57,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:32:57,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/ada4799cdddb41d987ce09814a1d485b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ada4799cdddb41d987ce09814a1d485b 2024-12-12T19:32:57,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ada4799cdddb41d987ce09814a1d485b, entries=350, sequenceid=182, filesize=64.1 K 2024-12-12T19:32:57,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/e34a6171f04841f5b80d99028842d23a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/e34a6171f04841f5b80d99028842d23a 2024-12-12T19:32:57,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/e34a6171f04841f5b80d99028842d23a, entries=150, sequenceid=182, filesize=11.9 K 2024-12-12T19:32:57,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/a70ff57801ce473e8d291b23bece196c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a70ff57801ce473e8d291b23bece196c 2024-12-12T19:32:57,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a70ff57801ce473e8d291b23bece196c, entries=150, sequenceid=182, filesize=11.9 K 2024-12-12T19:32:57,891 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 2c602e7d1968409c889fb12ef89e5146 in 1636ms, sequenceid=182, compaction requested=true 2024-12-12T19:32:57,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:57,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:32:57,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:57,892 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:57,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:32:57,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:57,892 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:57,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:32:57,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:57,898 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128191 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:57,898 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/A is initiating minor compaction (all files) 2024-12-12T19:32:57,898 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/A in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:57,898 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/93cbe951f33a4533913cf7e876b9bc8c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/0287589c6a734ba3bca07f9cbaca15e5, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ada4799cdddb41d987ce09814a1d485b] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=125.2 K 2024-12-12T19:32:57,898 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:57,898 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/93cbe951f33a4533913cf7e876b9bc8c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/0287589c6a734ba3bca07f9cbaca15e5, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ada4799cdddb41d987ce09814a1d485b] 2024-12-12T19:32:57,902 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:57,904 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:57,904 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93cbe951f33a4533913cf7e876b9bc8c, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1734031972540 2024-12-12T19:32:57,904 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/B is initiating minor compaction (all files) 2024-12-12T19:32:57,904 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/B in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:57,904 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/024cfc2629ea47818c0fb23e629c7e9e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/945e42ced943495390e4286ef8d8f545, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/e34a6171f04841f5b80d99028842d23a] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=35.9 K 2024-12-12T19:32:57,905 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T19:32:57,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:57,905 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T19:32:57,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:32:57,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:57,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:32:57,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:57,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:32:57,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:57,908 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0287589c6a734ba3bca07f9cbaca15e5, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1734031973777 2024-12-12T19:32:57,908 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 024cfc2629ea47818c0fb23e629c7e9e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1734031972540 2024-12-12T19:32:57,909 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting ada4799cdddb41d987ce09814a1d485b, keycount=350, bloomtype=ROW, size=64.1 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1734031976152 2024-12-12T19:32:57,909 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 945e42ced943495390e4286ef8d8f545, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1734031973777 2024-12-12T19:32:57,909 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting e34a6171f04841f5b80d99028842d23a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1734031976222 2024-12-12T19:32:57,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f53ed769ca0244cf9c9cff03858ec59e_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_0/A:col10/1734031976383/Put/seqid=0 2024-12-12T19:32:57,988 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:58,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741971_1147 (size=12304) 2024-12-12T19:32:58,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:58,017 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#B#compaction#119 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:58,018 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/7da79363171640b6b900d21b931eb9f3 is 50, key is test_row_0/B:col10/1734031976222/Put/seqid=0 2024-12-12T19:32:58,020 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f53ed769ca0244cf9c9cff03858ec59e_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f53ed769ca0244cf9c9cff03858ec59e_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:58,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/6b428d742dea478d9d963cf1c40533df, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:58,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/6b428d742dea478d9d963cf1c40533df is 175, key is test_row_0/A:col10/1734031976383/Put/seqid=0 2024-12-12T19:32:58,033 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121232f6ac3fa4c449ffb72e5c85d2fcdfcb_2c602e7d1968409c889fb12ef89e5146 store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:58,035 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121232f6ac3fa4c449ffb72e5c85d2fcdfcb_2c602e7d1968409c889fb12ef89e5146, store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:58,036 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121232f6ac3fa4c449ffb72e5c85d2fcdfcb_2c602e7d1968409c889fb12ef89e5146 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:58,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741972_1148 (size=12561) 2024-12-12T19:32:58,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741973_1149 (size=31105) 2024-12-12T19:32:58,114 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=207, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/6b428d742dea478d9d963cf1c40533df 2024-12-12T19:32:58,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741974_1150 (size=4469) 2024-12-12T19:32:58,132 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#A#compaction#118 average throughput is 0.17 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:58,132 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/3d3fb9eff0344b608c370413542c955a is 175, key is test_row_0/A:col10/1734031976222/Put/seqid=0 2024-12-12T19:32:58,134 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/7da79363171640b6b900d21b931eb9f3 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/7da79363171640b6b900d21b931eb9f3 2024-12-12T19:32:58,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/91d5bbd693bb44d2a92d4a9122bfe9da is 50, key is test_row_0/B:col10/1734031976383/Put/seqid=0 2024-12-12T19:32:58,159 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/B of 2c602e7d1968409c889fb12ef89e5146 into 7da79363171640b6b900d21b931eb9f3(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:58,159 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:58,159 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/B, priority=13, startTime=1734031977892; duration=0sec 2024-12-12T19:32:58,159 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:58,159 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:B 2024-12-12T19:32:58,159 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:58,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741975_1151 (size=12151) 2024-12-12T19:32:58,163 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:58,163 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/C is initiating minor compaction (all files) 2024-12-12T19:32:58,163 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/C in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:58,163 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a491b5fcfae44d1994b28b41ff7d5cbd, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/70a379f27c314012b5162fbd8a52ddc6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a70ff57801ce473e8d291b23bece196c] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=35.9 K 2024-12-12T19:32:58,164 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting a491b5fcfae44d1994b28b41ff7d5cbd, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1734031972540 2024-12-12T19:32:58,164 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/91d5bbd693bb44d2a92d4a9122bfe9da 2024-12-12T19:32:58,164 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 70a379f27c314012b5162fbd8a52ddc6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1734031973777 2024-12-12T19:32:58,165 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting a70ff57801ce473e8d291b23bece196c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1734031976222 2024-12-12T19:32:58,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741976_1152 (size=31515) 2024-12-12T19:32:58,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/247f6e0728ed477cac6244980a8c6933 is 50, key is test_row_0/C:col10/1734031976383/Put/seqid=0 2024-12-12T19:32:58,197 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#C#compaction#122 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:58,197 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/9316e9f2dbfc43e6a7671a5a902e43ec is 50, key is test_row_0/C:col10/1734031976222/Put/seqid=0 2024-12-12T19:32:58,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741977_1153 (size=12151) 2024-12-12T19:32:58,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741978_1154 (size=12561) 2024-12-12T19:32:58,244 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/9316e9f2dbfc43e6a7671a5a902e43ec as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/9316e9f2dbfc43e6a7671a5a902e43ec 2024-12-12T19:32:58,262 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/C of 2c602e7d1968409c889fb12ef89e5146 into 9316e9f2dbfc43e6a7671a5a902e43ec(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:58,262 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:58,262 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/C, priority=13, startTime=1734031977892; duration=0sec 2024-12-12T19:32:58,262 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:58,262 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:C 2024-12-12T19:32:58,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T19:32:58,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:32:58,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:58,637 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/247f6e0728ed477cac6244980a8c6933 2024-12-12T19:32:58,645 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/3d3fb9eff0344b608c370413542c955a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/3d3fb9eff0344b608c370413542c955a 2024-12-12T19:32:58,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:58,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032038653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:58,659 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/A of 2c602e7d1968409c889fb12ef89e5146 into 3d3fb9eff0344b608c370413542c955a(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:58,659 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:58,659 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/A, priority=13, startTime=1734031977892; duration=0sec 2024-12-12T19:32:58,659 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:58,659 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:A 2024-12-12T19:32:58,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/6b428d742dea478d9d963cf1c40533df as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/6b428d742dea478d9d963cf1c40533df 2024-12-12T19:32:58,670 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/6b428d742dea478d9d963cf1c40533df, entries=150, sequenceid=207, filesize=30.4 K 2024-12-12T19:32:58,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/91d5bbd693bb44d2a92d4a9122bfe9da as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/91d5bbd693bb44d2a92d4a9122bfe9da 2024-12-12T19:32:58,679 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/91d5bbd693bb44d2a92d4a9122bfe9da, entries=150, sequenceid=207, filesize=11.9 K 2024-12-12T19:32:58,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/247f6e0728ed477cac6244980a8c6933 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/247f6e0728ed477cac6244980a8c6933 2024-12-12T19:32:58,698 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/247f6e0728ed477cac6244980a8c6933, entries=150, sequenceid=207, filesize=11.9 K 2024-12-12T19:32:58,699 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 2c602e7d1968409c889fb12ef89e5146 in 794ms, sequenceid=207, compaction requested=false 2024-12-12T19:32:58,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:58,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:58,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-12T19:32:58,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-12T19:32:58,703 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-12T19:32:58,704 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5560 sec 2024-12-12T19:32:58,705 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 2.5640 sec 2024-12-12T19:32:58,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:58,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:32:58,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:32:58,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:58,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:32:58,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:58,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:32:58,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:58,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128bee1886c16944c4b37ec5f3ffa694be_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_0/A:col10/1734031978651/Put/seqid=0 2024-12-12T19:32:58,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741979_1155 (size=12304) 2024-12-12T19:32:58,835 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:58,856 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128bee1886c16944c4b37ec5f3ffa694be_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128bee1886c16944c4b37ec5f3ffa694be_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:58,868 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/55cc2e1fd0cd489a8dca821f8e2cbf50, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:58,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/55cc2e1fd0cd489a8dca821f8e2cbf50 is 175, key is test_row_0/A:col10/1734031978651/Put/seqid=0 2024-12-12T19:32:58,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:58,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032038899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:58,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741980_1156 (size=31105) 2024-12-12T19:32:58,915 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=222, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/55cc2e1fd0cd489a8dca821f8e2cbf50 2024-12-12T19:32:58,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/dee1aa39c673412b81017648c3ac3584 is 50, key is test_row_0/B:col10/1734031978651/Put/seqid=0 2024-12-12T19:32:58,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741981_1157 (size=12151) 2024-12-12T19:32:58,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/dee1aa39c673412b81017648c3ac3584 2024-12-12T19:32:59,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:59,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032039003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:59,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/063cb69611204ef9a37a0929bc33bb91 is 50, key is test_row_0/C:col10/1734031978651/Put/seqid=0 2024-12-12T19:32:59,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741982_1158 (size=12151) 2024-12-12T19:32:59,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/063cb69611204ef9a37a0929bc33bb91 2024-12-12T19:32:59,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/55cc2e1fd0cd489a8dca821f8e2cbf50 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/55cc2e1fd0cd489a8dca821f8e2cbf50 2024-12-12T19:32:59,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/55cc2e1fd0cd489a8dca821f8e2cbf50, entries=150, sequenceid=222, filesize=30.4 K 2024-12-12T19:32:59,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/dee1aa39c673412b81017648c3ac3584 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/dee1aa39c673412b81017648c3ac3584 2024-12-12T19:32:59,100 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/dee1aa39c673412b81017648c3ac3584, entries=150, sequenceid=222, filesize=11.9 K 2024-12-12T19:32:59,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/063cb69611204ef9a37a0929bc33bb91 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/063cb69611204ef9a37a0929bc33bb91 2024-12-12T19:32:59,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/063cb69611204ef9a37a0929bc33bb91, entries=150, sequenceid=222, filesize=11.9 K 2024-12-12T19:32:59,110 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 2c602e7d1968409c889fb12ef89e5146 in 344ms, sequenceid=222, compaction requested=true 2024-12-12T19:32:59,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:59,110 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:59,112 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93725 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:59,112 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/A is initiating minor compaction (all files) 2024-12-12T19:32:59,112 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/A in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:59,112 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/3d3fb9eff0344b608c370413542c955a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/6b428d742dea478d9d963cf1c40533df, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/55cc2e1fd0cd489a8dca821f8e2cbf50] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=91.5 K 2024-12-12T19:32:59,112 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:59,112 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/3d3fb9eff0344b608c370413542c955a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/6b428d742dea478d9d963cf1c40533df, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/55cc2e1fd0cd489a8dca821f8e2cbf50] 2024-12-12T19:32:59,114 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d3fb9eff0344b608c370413542c955a, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1734031976222 2024-12-12T19:32:59,114 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b428d742dea478d9d963cf1c40533df, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734031976383 2024-12-12T19:32:59,114 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55cc2e1fd0cd489a8dca821f8e2cbf50, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1734031978646 2024-12-12T19:32:59,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:32:59,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:59,118 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:59,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:32:59,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:59,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:32:59,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:59,122 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:59,122 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/B is initiating minor compaction (all files) 2024-12-12T19:32:59,122 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/B in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:59,122 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/7da79363171640b6b900d21b931eb9f3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/91d5bbd693bb44d2a92d4a9122bfe9da, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/dee1aa39c673412b81017648c3ac3584] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=36.0 K 2024-12-12T19:32:59,123 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 7da79363171640b6b900d21b931eb9f3, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1734031976222 2024-12-12T19:32:59,124 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 91d5bbd693bb44d2a92d4a9122bfe9da, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734031976383 2024-12-12T19:32:59,125 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting dee1aa39c673412b81017648c3ac3584, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1734031978646 2024-12-12T19:32:59,134 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:59,148 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412121f6689e811ef410f8e46ccf603ec90b0_2c602e7d1968409c889fb12ef89e5146 store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:59,150 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412121f6689e811ef410f8e46ccf603ec90b0_2c602e7d1968409c889fb12ef89e5146, store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:59,150 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412121f6689e811ef410f8e46ccf603ec90b0_2c602e7d1968409c889fb12ef89e5146 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:59,163 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#B#compaction#127 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:59,164 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/9713e6fe326f45f89b78e7395170cf6f is 50, key is test_row_0/B:col10/1734031978651/Put/seqid=0 2024-12-12T19:32:59,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741983_1159 (size=4469) 2024-12-12T19:32:59,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741984_1160 (size=12663) 2024-12-12T19:32:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:59,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T19:32:59,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:32:59,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:59,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:32:59,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:59,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:32:59,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:32:59,216 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/9713e6fe326f45f89b78e7395170cf6f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/9713e6fe326f45f89b78e7395170cf6f 2024-12-12T19:32:59,237 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/B of 2c602e7d1968409c889fb12ef89e5146 into 9713e6fe326f45f89b78e7395170cf6f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:59,237 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:59,237 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/B, priority=13, startTime=1734031979118; duration=0sec 2024-12-12T19:32:59,238 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:32:59,238 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:B 2024-12-12T19:32:59,238 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:32:59,242 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:32:59,242 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/C is initiating minor compaction (all files) 2024-12-12T19:32:59,242 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/C in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:32:59,242 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/9316e9f2dbfc43e6a7671a5a902e43ec, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/247f6e0728ed477cac6244980a8c6933, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/063cb69611204ef9a37a0929bc33bb91] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=36.0 K 2024-12-12T19:32:59,243 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 9316e9f2dbfc43e6a7671a5a902e43ec, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1734031976222 2024-12-12T19:32:59,244 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 247f6e0728ed477cac6244980a8c6933, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734031976383 2024-12-12T19:32:59,244 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 063cb69611204ef9a37a0929bc33bb91, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1734031978646 2024-12-12T19:32:59,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123313e00cb65045bb81e54a2032ca5271_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_0/A:col10/1734031978894/Put/seqid=0 2024-12-12T19:32:59,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:59,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032039251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:59,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741985_1161 (size=12304) 2024-12-12T19:32:59,296 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#C#compaction#129 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:59,297 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/8a6fe4a29569480da14465b9640d4828 is 50, key is test_row_0/C:col10/1734031978651/Put/seqid=0 2024-12-12T19:32:59,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741986_1162 (size=12663) 2024-12-12T19:32:59,355 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/8a6fe4a29569480da14465b9640d4828 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/8a6fe4a29569480da14465b9640d4828 2024-12-12T19:32:59,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:59,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032039356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:59,399 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/C of 2c602e7d1968409c889fb12ef89e5146 into 8a6fe4a29569480da14465b9640d4828(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:32:59,399 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:32:59,399 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/C, priority=13, startTime=1734031979119; duration=0sec 2024-12-12T19:32:59,400 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:32:59,400 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:C 2024-12-12T19:32:59,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:59,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032039568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:32:59,600 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#A#compaction#126 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:32:59,601 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/c460bb0c1fca4676998bdf81595fc357 is 175, key is test_row_0/A:col10/1734031978651/Put/seqid=0 2024-12-12T19:32:59,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741987_1163 (size=31617) 2024-12-12T19:32:59,685 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:32:59,697 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123313e00cb65045bb81e54a2032ca5271_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123313e00cb65045bb81e54a2032ca5271_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:32:59,706 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/78c82177381b4af2bbb858a2cdf3fe9a, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:32:59,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/78c82177381b4af2bbb858a2cdf3fe9a is 175, key is test_row_0/A:col10/1734031978894/Put/seqid=0 2024-12-12T19:32:59,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741988_1164 (size=31105) 2024-12-12T19:32:59,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:32:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032039876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:00,079 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/c460bb0c1fca4676998bdf81595fc357 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/c460bb0c1fca4676998bdf81595fc357 2024-12-12T19:33:00,112 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/A of 2c602e7d1968409c889fb12ef89e5146 into c460bb0c1fca4676998bdf81595fc357(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-12-12T19:33:00,112 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:33:00,112 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/A, priority=13, startTime=1734031979110; duration=1sec 2024-12-12T19:33:00,112 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:00,112 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:A 2024-12-12T19:33:00,151 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=247, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/78c82177381b4af2bbb858a2cdf3fe9a 2024-12-12T19:33:00,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/486360422afb44469aaec93061def285 is 50, key is test_row_0/B:col10/1734031978894/Put/seqid=0 2024-12-12T19:33:00,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741989_1165 (size=12151) 2024-12-12T19:33:00,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T19:33:00,280 INFO [Thread-526 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-12T19:33:00,281 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:33:00,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-12T19:33:00,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-12T19:33:00,283 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:33:00,283 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:33:00,284 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:33:00,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-12T19:33:00,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:00,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032040387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:00,435 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:00,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:00,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:00,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:00,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:00,436 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:00,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:00,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:00,591 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-12T19:33:00,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:00,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:00,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:00,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:00,595 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:00,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:00,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:00,661 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/486360422afb44469aaec93061def285 2024-12-12T19:33:00,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/f54acd49c60a4fe9bd6fe7f874f062cc is 50, key is test_row_0/C:col10/1734031978894/Put/seqid=0 2024-12-12T19:33:00,763 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:00,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741990_1166 (size=12151) 2024-12-12T19:33:00,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:00,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:00,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:00,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:00,768 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:00,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:00,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:00,872 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T19:33:00,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-12T19:33:00,947 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:00,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:00,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:00,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:00,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:00,955 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:00,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:00,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,112 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:01,113 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:01,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:01,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:01,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:01,113 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,162 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/f54acd49c60a4fe9bd6fe7f874f062cc 2024-12-12T19:33:01,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/78c82177381b4af2bbb858a2cdf3fe9a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/78c82177381b4af2bbb858a2cdf3fe9a 2024-12-12T19:33:01,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/78c82177381b4af2bbb858a2cdf3fe9a, entries=150, sequenceid=247, filesize=30.4 K 2024-12-12T19:33:01,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/486360422afb44469aaec93061def285 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/486360422afb44469aaec93061def285 2024-12-12T19:33:01,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/486360422afb44469aaec93061def285, entries=150, sequenceid=247, filesize=11.9 K 2024-12-12T19:33:01,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/f54acd49c60a4fe9bd6fe7f874f062cc as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f54acd49c60a4fe9bd6fe7f874f062cc 2024-12-12T19:33:01,272 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:01,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:01,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:01,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:01,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:01,273 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f54acd49c60a4fe9bd6fe7f874f062cc, entries=150, sequenceid=247, filesize=11.9 K 2024-12-12T19:33:01,283 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 2c602e7d1968409c889fb12ef89e5146 in 2073ms, sequenceid=247, compaction requested=false 2024-12-12T19:33:01,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:33:01,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-12T19:33:01,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:01,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:33:01,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:33:01,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:01,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:33:01,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:01,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:33:01,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:01,427 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:01,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:01,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:01,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:01,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:01,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a2e4ef63b9ee4f87939c596ed2bc4fe1_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_0/A:col10/1734031981408/Put/seqid=0 2024-12-12T19:33:01,436 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741991_1167 (size=12404) 2024-12-12T19:33:01,599 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:01,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:01,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:01,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:01,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:01,603 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:01,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032041615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:01,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:01,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032041718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:01,760 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:01,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:01,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:01,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:01,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:01,762 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,897 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:01,912 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a2e4ef63b9ee4f87939c596ed2bc4fe1_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a2e4ef63b9ee4f87939c596ed2bc4fe1_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:01,919 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:01,921 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:01,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:01,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:01,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:01,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,923 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/5e2b3efed53c4ebe921987d412dfb135, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:33:01,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/5e2b3efed53c4ebe921987d412dfb135 is 175, key is test_row_0/A:col10/1734031981408/Put/seqid=0 2024-12-12T19:33:01,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:01,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032041922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:01,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:01,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741992_1168 (size=31205) 2024-12-12T19:33:02,083 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:02,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:02,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:02,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:02,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:02,090 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:02,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032042231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:02,249 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:02,251 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:02,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:02,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:02,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:02,252 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,375 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=262, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/5e2b3efed53c4ebe921987d412dfb135 2024-12-12T19:33:02,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-12T19:33:02,407 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:02,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:02,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:02,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:02,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:02,408 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/c0e33dfb36d64d399568049fb9c40582 is 50, key is test_row_0/B:col10/1734031981408/Put/seqid=0 2024-12-12T19:33:02,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741993_1169 (size=12251) 2024-12-12T19:33:02,567 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:02,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:02,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:02,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:02,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:02,568 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,720 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:02,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:02,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:02,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:02,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:02,721 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:02,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032042741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:02,879 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:02,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:02,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:02,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:02,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:02,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:02,926 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/c0e33dfb36d64d399568049fb9c40582 2024-12-12T19:33:02,995 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/1af9255524d043be945f502e0030409f is 50, key is test_row_0/C:col10/1734031981408/Put/seqid=0 2024-12-12T19:33:03,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741994_1170 (size=12251) 2024-12-12T19:33:03,031 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/1af9255524d043be945f502e0030409f 2024-12-12T19:33:03,044 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:03,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:03,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:03,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:03,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:03,044 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:03,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:03,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:03,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/5e2b3efed53c4ebe921987d412dfb135 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/5e2b3efed53c4ebe921987d412dfb135 2024-12-12T19:33:03,072 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/5e2b3efed53c4ebe921987d412dfb135, entries=150, sequenceid=262, filesize=30.5 K 2024-12-12T19:33:03,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:03,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44212 deadline: 1734032043073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:03,077 DEBUG [Thread-520 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18229 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:33:03,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/c0e33dfb36d64d399568049fb9c40582 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/c0e33dfb36d64d399568049fb9c40582 2024-12-12T19:33:03,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:03,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44254 deadline: 1734032043091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:03,095 DEBUG [Thread-516 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18247 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:33:03,096 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/c0e33dfb36d64d399568049fb9c40582, entries=150, sequenceid=262, filesize=12.0 K 2024-12-12T19:33:03,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/1af9255524d043be945f502e0030409f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/1af9255524d043be945f502e0030409f 2024-12-12T19:33:03,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:03,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44244 deadline: 1734032043096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:03,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:03,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44256 deadline: 1734032043104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:03,108 DEBUG [Thread-518 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18319 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:33:03,112 DEBUG [Thread-522 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18269 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:33:03,133 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/1af9255524d043be945f502e0030409f, entries=150, sequenceid=262, filesize=12.0 K 2024-12-12T19:33:03,136 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 2c602e7d1968409c889fb12ef89e5146 in 1717ms, sequenceid=262, compaction requested=true 2024-12-12T19:33:03,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:33:03,136 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:03,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:33:03,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:03,137 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:03,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:33:03,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:03,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:33:03,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:03,145 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:03,145 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/B is initiating minor compaction (all files) 2024-12-12T19:33:03,145 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/B in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:03,146 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/9713e6fe326f45f89b78e7395170cf6f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/486360422afb44469aaec93061def285, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/c0e33dfb36d64d399568049fb9c40582] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=36.2 K 2024-12-12T19:33:03,146 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93927 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:03,146 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/A is initiating minor compaction (all files) 2024-12-12T19:33:03,146 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/A in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:03,147 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/c460bb0c1fca4676998bdf81595fc357, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/78c82177381b4af2bbb858a2cdf3fe9a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/5e2b3efed53c4ebe921987d412dfb135] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=91.7 K 2024-12-12T19:33:03,147 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:03,147 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/c460bb0c1fca4676998bdf81595fc357, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/78c82177381b4af2bbb858a2cdf3fe9a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/5e2b3efed53c4ebe921987d412dfb135] 2024-12-12T19:33:03,147 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 9713e6fe326f45f89b78e7395170cf6f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1734031978646 2024-12-12T19:33:03,148 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting c460bb0c1fca4676998bdf81595fc357, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1734031978646 2024-12-12T19:33:03,149 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 486360422afb44469aaec93061def285, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1734031978889 2024-12-12T19:33:03,151 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78c82177381b4af2bbb858a2cdf3fe9a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1734031978889 2024-12-12T19:33:03,152 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting c0e33dfb36d64d399568049fb9c40582, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1734031979221 2024-12-12T19:33:03,153 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e2b3efed53c4ebe921987d412dfb135, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1734031979221 2024-12-12T19:33:03,173 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#B#compaction#135 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:03,174 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/565004c141964dc99cbce80bf1f26e12 is 50, key is test_row_0/B:col10/1734031981408/Put/seqid=0 2024-12-12T19:33:03,188 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:33:03,197 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:03,204 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T19:33:03,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:03,205 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T19:33:03,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:33:03,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:03,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:33:03,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:03,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:33:03,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:03,226 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412125a8b85b856a84738b19c10f1d3ec0815_2c602e7d1968409c889fb12ef89e5146 store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:33:03,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741995_1171 (size=12865) 2024-12-12T19:33:03,231 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412125a8b85b856a84738b19c10f1d3ec0815_2c602e7d1968409c889fb12ef89e5146, store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:33:03,231 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412125a8b85b856a84738b19c10f1d3ec0815_2c602e7d1968409c889fb12ef89e5146 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:33:03,267 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/565004c141964dc99cbce80bf1f26e12 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/565004c141964dc99cbce80bf1f26e12 2024-12-12T19:33:03,279 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/B of 2c602e7d1968409c889fb12ef89e5146 into 565004c141964dc99cbce80bf1f26e12(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:03,279 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:33:03,279 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/B, priority=13, startTime=1734031983137; duration=0sec 2024-12-12T19:33:03,279 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:03,279 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:B 2024-12-12T19:33:03,279 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:03,282 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:03,282 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/C is initiating minor compaction (all files) 2024-12-12T19:33:03,282 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/C in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:03,282 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/8a6fe4a29569480da14465b9640d4828, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f54acd49c60a4fe9bd6fe7f874f062cc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/1af9255524d043be945f502e0030409f] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=36.2 K 2024-12-12T19:33:03,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a1dae4a6ece24e758d5945f4ddccaf6f_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_0/A:col10/1734031981593/Put/seqid=0 2024-12-12T19:33:03,288 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a6fe4a29569480da14465b9640d4828, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1734031978646 2024-12-12T19:33:03,289 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting f54acd49c60a4fe9bd6fe7f874f062cc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1734031978889 2024-12-12T19:33:03,290 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 1af9255524d043be945f502e0030409f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1734031979221 2024-12-12T19:33:03,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741996_1172 (size=4469) 2024-12-12T19:33:03,319 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#A#compaction#136 average throughput is 0.19 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:03,320 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/3f1d0ca0cdfc4cc7afa66d3c94272f1e is 175, key is test_row_0/A:col10/1734031981408/Put/seqid=0 2024-12-12T19:33:03,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741997_1173 (size=12454) 2024-12-12T19:33:03,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:03,329 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#C#compaction#138 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:03,330 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/fa2248fbc09b4c4aa35238b7c6173829 is 50, key is test_row_0/C:col10/1734031981408/Put/seqid=0 2024-12-12T19:33:03,345 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a1dae4a6ece24e758d5945f4ddccaf6f_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a1dae4a6ece24e758d5945f4ddccaf6f_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:03,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/24a8127a2f194084b31ce34776272241, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:33:03,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/24a8127a2f194084b31ce34776272241 is 175, key is test_row_0/A:col10/1734031981593/Put/seqid=0 2024-12-12T19:33:03,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741998_1174 (size=31819) 2024-12-12T19:33:03,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741999_1175 (size=12865) 2024-12-12T19:33:03,500 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/fa2248fbc09b4c4aa35238b7c6173829 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/fa2248fbc09b4c4aa35238b7c6173829 2024-12-12T19:33:03,514 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/C of 2c602e7d1968409c889fb12ef89e5146 into fa2248fbc09b4c4aa35238b7c6173829(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:03,514 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:33:03,514 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/C, priority=13, startTime=1734031983137; duration=0sec 2024-12-12T19:33:03,514 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:03,514 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:C 2024-12-12T19:33:03,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742000_1176 (size=31255) 2024-12-12T19:33:03,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:03,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. as already flushing 2024-12-12T19:33:03,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:03,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 288 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032043843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:03,860 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/3f1d0ca0cdfc4cc7afa66d3c94272f1e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/3f1d0ca0cdfc4cc7afa66d3c94272f1e 2024-12-12T19:33:03,883 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/A of 2c602e7d1968409c889fb12ef89e5146 into 3f1d0ca0cdfc4cc7afa66d3c94272f1e(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:03,883 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:33:03,883 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/A, priority=13, startTime=1734031983136; duration=0sec 2024-12-12T19:33:03,884 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:03,884 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:A 2024-12-12T19:33:03,960 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=286, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/24a8127a2f194084b31ce34776272241 2024-12-12T19:33:03,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:03,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 290 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032043958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:03,982 DEBUG [Thread-527 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14c16cd4 to 127.0.0.1:52216 2024-12-12T19:33:03,982 DEBUG [Thread-527 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:03,983 DEBUG [Thread-529 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0341384e to 127.0.0.1:52216 2024-12-12T19:33:03,983 DEBUG [Thread-529 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:03,984 DEBUG [Thread-533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c1ec7ee to 127.0.0.1:52216 2024-12-12T19:33:03,984 DEBUG [Thread-533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:03,989 DEBUG [Thread-531 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26b120d9 to 127.0.0.1:52216 2024-12-12T19:33:03,989 DEBUG [Thread-531 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:04,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/8b946f4f0aba4962a0739e55594acb97 is 50, key is test_row_0/B:col10/1734031981593/Put/seqid=0 2024-12-12T19:33:04,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742001_1177 (size=12301) 2024-12-12T19:33:04,075 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/8b946f4f0aba4962a0739e55594acb97 2024-12-12T19:33:04,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/267918a0887846b3858ce7e7cd82e6e3 is 50, key is test_row_0/C:col10/1734031981593/Put/seqid=0 2024-12-12T19:33:04,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:04,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 292 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44268 deadline: 1734032044175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:04,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742002_1178 (size=12301) 2024-12-12T19:33:04,189 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/267918a0887846b3858ce7e7cd82e6e3 2024-12-12T19:33:04,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/24a8127a2f194084b31ce34776272241 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/24a8127a2f194084b31ce34776272241 2024-12-12T19:33:04,228 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/24a8127a2f194084b31ce34776272241, entries=150, sequenceid=286, filesize=30.5 K 2024-12-12T19:33:04,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/8b946f4f0aba4962a0739e55594acb97 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/8b946f4f0aba4962a0739e55594acb97 2024-12-12T19:33:04,237 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/8b946f4f0aba4962a0739e55594acb97, entries=150, sequenceid=286, filesize=12.0 K 2024-12-12T19:33:04,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/267918a0887846b3858ce7e7cd82e6e3 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/267918a0887846b3858ce7e7cd82e6e3 2024-12-12T19:33:04,250 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/267918a0887846b3858ce7e7cd82e6e3, entries=150, sequenceid=286, filesize=12.0 K 2024-12-12T19:33:04,259 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 2c602e7d1968409c889fb12ef89e5146 in 1055ms, sequenceid=286, compaction requested=false 2024-12-12T19:33:04,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:33:04,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:04,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-12T19:33:04,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-12T19:33:04,277 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-12T19:33:04,277 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.9870 sec 2024-12-12T19:33:04,280 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 3.9960 sec 2024-12-12T19:33:04,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-12T19:33:04,404 INFO [Thread-526 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-12T19:33:04,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:04,499 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T19:33:04,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:33:04,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:04,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:33:04,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:04,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:33:04,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:04,500 DEBUG [Thread-524 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22e911df to 127.0.0.1:52216 2024-12-12T19:33:04,501 DEBUG [Thread-524 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:04,529 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412121eeaef432c584c67a759076f9817a573_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_0/A:col10/1734031983841/Put/seqid=0 2024-12-12T19:33:04,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742003_1179 (size=12454) 2024-12-12T19:33:04,615 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:04,656 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412121eeaef432c584c67a759076f9817a573_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412121eeaef432c584c67a759076f9817a573_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:04,669 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/b72b377b91fb4a4aaa97b0f88e6ebb57, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:33:04,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/b72b377b91fb4a4aaa97b0f88e6ebb57 is 175, key is test_row_0/A:col10/1734031983841/Put/seqid=0 2024-12-12T19:33:04,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742004_1180 (size=31255) 2024-12-12T19:33:04,719 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=302, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/b72b377b91fb4a4aaa97b0f88e6ebb57 2024-12-12T19:33:04,781 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/5a299975228e41fba8b0fc4170015045 is 50, key is test_row_0/B:col10/1734031983841/Put/seqid=0 2024-12-12T19:33:04,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742005_1181 (size=12301) 2024-12-12T19:33:04,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/5a299975228e41fba8b0fc4170015045 2024-12-12T19:33:04,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/f7402b9305d249cf97568960f7e13e61 is 50, key is test_row_0/C:col10/1734031983841/Put/seqid=0 2024-12-12T19:33:04,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742006_1182 (size=12301) 2024-12-12T19:33:05,267 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/f7402b9305d249cf97568960f7e13e61 2024-12-12T19:33:05,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/b72b377b91fb4a4aaa97b0f88e6ebb57 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/b72b377b91fb4a4aaa97b0f88e6ebb57 2024-12-12T19:33:05,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/b72b377b91fb4a4aaa97b0f88e6ebb57, entries=150, sequenceid=302, filesize=30.5 K 2024-12-12T19:33:05,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/5a299975228e41fba8b0fc4170015045 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/5a299975228e41fba8b0fc4170015045 2024-12-12T19:33:05,288 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/5a299975228e41fba8b0fc4170015045, entries=150, sequenceid=302, filesize=12.0 K 2024-12-12T19:33:05,289 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/f7402b9305d249cf97568960f7e13e61 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f7402b9305d249cf97568960f7e13e61 2024-12-12T19:33:05,294 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f7402b9305d249cf97568960f7e13e61, entries=150, sequenceid=302, filesize=12.0 K 2024-12-12T19:33:05,295 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for 2c602e7d1968409c889fb12ef89e5146 in 796ms, sequenceid=302, compaction requested=true 2024-12-12T19:33:05,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:33:05,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:33:05,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:05,295 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:05,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:33:05,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:05,295 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:05,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c602e7d1968409c889fb12ef89e5146:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:33:05,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:05,297 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94329 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:05,297 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37467 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:05,297 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/B is initiating minor compaction (all files) 2024-12-12T19:33:05,297 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/A is initiating minor compaction (all files) 2024-12-12T19:33:05,297 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/B in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:05,297 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/A in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:05,297 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/565004c141964dc99cbce80bf1f26e12, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/8b946f4f0aba4962a0739e55594acb97, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/5a299975228e41fba8b0fc4170015045] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=36.6 K 2024-12-12T19:33:05,297 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/3f1d0ca0cdfc4cc7afa66d3c94272f1e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/24a8127a2f194084b31ce34776272241, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/b72b377b91fb4a4aaa97b0f88e6ebb57] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=92.1 K 2024-12-12T19:33:05,297 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:05,297 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/3f1d0ca0cdfc4cc7afa66d3c94272f1e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/24a8127a2f194084b31ce34776272241, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/b72b377b91fb4a4aaa97b0f88e6ebb57] 2024-12-12T19:33:05,298 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 565004c141964dc99cbce80bf1f26e12, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1734031979221 2024-12-12T19:33:05,298 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f1d0ca0cdfc4cc7afa66d3c94272f1e, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1734031979221 2024-12-12T19:33:05,298 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b946f4f0aba4962a0739e55594acb97, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1734031981534 2024-12-12T19:33:05,298 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24a8127a2f194084b31ce34776272241, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1734031981534 2024-12-12T19:33:05,299 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting b72b377b91fb4a4aaa97b0f88e6ebb57, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1734031983767 2024-12-12T19:33:05,299 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a299975228e41fba8b0fc4170015045, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1734031983767 2024-12-12T19:33:05,313 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#B#compaction#144 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:05,314 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/34ea49ac49c4441f8426ddd5419515aa is 50, key is test_row_0/B:col10/1734031983841/Put/seqid=0 2024-12-12T19:33:05,340 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:33:05,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742007_1183 (size=13017) 2024-12-12T19:33:05,351 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212cfc821686bb84b1ba88f73e3ee7f38f4_2c602e7d1968409c889fb12ef89e5146 store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:33:05,376 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212cfc821686bb84b1ba88f73e3ee7f38f4_2c602e7d1968409c889fb12ef89e5146, store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:33:05,376 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212cfc821686bb84b1ba88f73e3ee7f38f4_2c602e7d1968409c889fb12ef89e5146 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:33:05,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742008_1184 (size=4469) 2024-12-12T19:33:05,425 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#A#compaction#145 average throughput is 0.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:05,426 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/529d6d754c19440d9ace06eff7e22369 is 175, key is test_row_0/A:col10/1734031983841/Put/seqid=0 2024-12-12T19:33:05,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742009_1185 (size=31971) 2024-12-12T19:33:05,505 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/529d6d754c19440d9ace06eff7e22369 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/529d6d754c19440d9ace06eff7e22369 2024-12-12T19:33:05,535 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/A of 2c602e7d1968409c889fb12ef89e5146 into 529d6d754c19440d9ace06eff7e22369(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:05,535 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:33:05,535 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/A, priority=13, startTime=1734031985295; duration=0sec 2024-12-12T19:33:05,536 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:05,536 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:A 2024-12-12T19:33:05,537 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:05,544 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37467 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:05,544 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 2c602e7d1968409c889fb12ef89e5146/C is initiating minor compaction (all files) 2024-12-12T19:33:05,544 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c602e7d1968409c889fb12ef89e5146/C in TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:05,544 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/fa2248fbc09b4c4aa35238b7c6173829, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/267918a0887846b3858ce7e7cd82e6e3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f7402b9305d249cf97568960f7e13e61] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp, totalSize=36.6 K 2024-12-12T19:33:05,550 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa2248fbc09b4c4aa35238b7c6173829, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1734031979221 2024-12-12T19:33:05,551 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 267918a0887846b3858ce7e7cd82e6e3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1734031981534 2024-12-12T19:33:05,551 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7402b9305d249cf97568960f7e13e61, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1734031983767 2024-12-12T19:33:05,579 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c602e7d1968409c889fb12ef89e5146#C#compaction#146 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:05,579 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/da2228c978224241a3b7951be40e0a7f is 50, key is test_row_0/C:col10/1734031983841/Put/seqid=0 2024-12-12T19:33:05,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742010_1186 (size=13017) 2024-12-12T19:33:05,647 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/da2228c978224241a3b7951be40e0a7f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/da2228c978224241a3b7951be40e0a7f 2024-12-12T19:33:05,656 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/C of 2c602e7d1968409c889fb12ef89e5146 into da2228c978224241a3b7951be40e0a7f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:05,657 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:33:05,657 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/C, priority=13, startTime=1734031985295; duration=0sec 2024-12-12T19:33:05,657 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:05,657 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:C 2024-12-12T19:33:05,777 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/34ea49ac49c4441f8426ddd5419515aa as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/34ea49ac49c4441f8426ddd5419515aa 2024-12-12T19:33:05,825 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c602e7d1968409c889fb12ef89e5146/B of 2c602e7d1968409c889fb12ef89e5146 into 34ea49ac49c4441f8426ddd5419515aa(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:05,825 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:33:05,825 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146., storeName=2c602e7d1968409c889fb12ef89e5146/B, priority=13, startTime=1734031985295; duration=0sec 2024-12-12T19:33:05,825 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:05,825 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c602e7d1968409c889fb12ef89e5146:B 2024-12-12T19:33:08,656 DEBUG [master/4c9c438b6eeb:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 2c541955553f42ed357f6055374132eb changed from -1.0 to 0.0, refreshing cache 2024-12-12T19:33:13,105 DEBUG [Thread-520 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42e904d8 to 127.0.0.1:52216 2024-12-12T19:33:13,105 DEBUG [Thread-520 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:13,147 DEBUG [Thread-522 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a4c53ed to 127.0.0.1:52216 2024-12-12T19:33:13,148 DEBUG [Thread-522 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:13,162 DEBUG [Thread-518 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e3a4420 to 127.0.0.1:52216 2024-12-12T19:33:13,162 DEBUG [Thread-518 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:13,201 DEBUG [Thread-516 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c820ef9 to 127.0.0.1:52216 2024-12-12T19:33:13,201 DEBUG [Thread-516 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:13,202 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T19:33:13,202 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 6 2024-12-12T19:33:13,202 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 10 2024-12-12T19:33:13,202 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-12-12T19:33:13,202 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 7 2024-12-12T19:33:13,202 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 193 2024-12-12T19:33:13,202 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T19:33:13,202 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3073 2024-12-12T19:33:13,202 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2951 2024-12-12T19:33:13,202 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T19:33:13,202 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1373 2024-12-12T19:33:13,202 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4119 rows 2024-12-12T19:33:13,202 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1392 2024-12-12T19:33:13,202 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4176 rows 2024-12-12T19:33:13,202 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T19:33:13,202 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26401a5f to 127.0.0.1:52216 2024-12-12T19:33:13,203 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:13,204 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T19:33:13,205 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T19:33:13,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:13,218 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734031993217"}]},"ts":"1734031993217"} 2024-12-12T19:33:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T19:33:13,219 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T19:33:13,256 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T19:33:13,267 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T19:33:13,269 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c602e7d1968409c889fb12ef89e5146, UNASSIGN}] 2024-12-12T19:33:13,270 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=57, ppid=56, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c602e7d1968409c889fb12ef89e5146, UNASSIGN 2024-12-12T19:33:13,272 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=57 updating hbase:meta row=2c602e7d1968409c889fb12ef89e5146, regionState=CLOSING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:13,274 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T19:33:13,274 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; CloseRegionProcedure 2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:33:13,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T19:33:13,433 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:13,435 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] handler.UnassignRegionHandler(124): Close 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:13,435 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T19:33:13,435 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1681): Closing 2c602e7d1968409c889fb12ef89e5146, disabling compactions & flushes 2024-12-12T19:33:13,435 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:13,435 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:13,435 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. after waiting 0 ms 2024-12-12T19:33:13,435 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:13,435 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(2837): Flushing 2c602e7d1968409c889fb12ef89e5146 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-12T19:33:13,435 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=A 2024-12-12T19:33:13,436 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:13,436 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=B 2024-12-12T19:33:13,436 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:13,436 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c602e7d1968409c889fb12ef89e5146, store=C 2024-12-12T19:33:13,436 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:13,486 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128041dccf25084df9b2f898e06c365a85_2c602e7d1968409c889fb12ef89e5146 is 50, key is test_row_0/A:col10/1734031993196/Put/seqid=0 2024-12-12T19:33:13,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T19:33:13,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742011_1187 (size=9914) 2024-12-12T19:33:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T19:33:13,952 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:13,960 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128041dccf25084df9b2f898e06c365a85_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128041dccf25084df9b2f898e06c365a85_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:13,962 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/f506f694badf484ea7bf9e408104d7e1, store: [table=TestAcidGuarantees family=A region=2c602e7d1968409c889fb12ef89e5146] 2024-12-12T19:33:13,962 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/f506f694badf484ea7bf9e408104d7e1 is 175, key is test_row_0/A:col10/1734031993196/Put/seqid=0 2024-12-12T19:33:13,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742012_1188 (size=22561) 2024-12-12T19:33:14,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T19:33:14,386 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=312, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/f506f694badf484ea7bf9e408104d7e1 2024-12-12T19:33:14,415 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/b71d1a24f3bc415abed830883962ff0f is 50, key is test_row_0/B:col10/1734031993196/Put/seqid=0 2024-12-12T19:33:14,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742013_1189 (size=9857) 2024-12-12T19:33:14,856 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/b71d1a24f3bc415abed830883962ff0f 2024-12-12T19:33:14,866 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/a8211d717bc84e16aefdaaca7fde7940 is 50, key is test_row_0/C:col10/1734031993196/Put/seqid=0 2024-12-12T19:33:14,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742014_1190 (size=9857) 2024-12-12T19:33:15,273 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/a8211d717bc84e16aefdaaca7fde7940 2024-12-12T19:33:15,293 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/A/f506f694badf484ea7bf9e408104d7e1 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/f506f694badf484ea7bf9e408104d7e1 2024-12-12T19:33:15,309 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/f506f694badf484ea7bf9e408104d7e1, entries=100, sequenceid=312, filesize=22.0 K 2024-12-12T19:33:15,311 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/B/b71d1a24f3bc415abed830883962ff0f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/b71d1a24f3bc415abed830883962ff0f 2024-12-12T19:33:15,317 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/b71d1a24f3bc415abed830883962ff0f, entries=100, sequenceid=312, filesize=9.6 K 2024-12-12T19:33:15,319 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/.tmp/C/a8211d717bc84e16aefdaaca7fde7940 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a8211d717bc84e16aefdaaca7fde7940 2024-12-12T19:33:15,328 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a8211d717bc84e16aefdaaca7fde7940, entries=100, sequenceid=312, filesize=9.6 K 2024-12-12T19:33:15,329 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 2c602e7d1968409c889fb12ef89e5146 in 1894ms, sequenceid=312, compaction requested=false 2024-12-12T19:33:15,330 DEBUG [StoreCloser-TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/99608738123b4a11a5887f145f01b50a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/bce9068fcf9b44ad98977f2f0e7bbc3d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/91e35e09cd924ffa8d79216341fcee8e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/e42aae612ebe4db4bb8b523cb9b3cf77, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/a73bc7b5df06460e931085dc2ea02af1, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ca2f78e1dc084ab5b29953f72fb31ed1, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/43ed2f36cf934b7eab03e9720fd053af, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ea6ea2b9f2654ae8ab5b5f1c270f6e4e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/e21481621ce642c884b36ea73248af5c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/93cbe951f33a4533913cf7e876b9bc8c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/0287589c6a734ba3bca07f9cbaca15e5, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ada4799cdddb41d987ce09814a1d485b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/3d3fb9eff0344b608c370413542c955a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/6b428d742dea478d9d963cf1c40533df, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/c460bb0c1fca4676998bdf81595fc357, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/55cc2e1fd0cd489a8dca821f8e2cbf50, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/78c82177381b4af2bbb858a2cdf3fe9a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/3f1d0ca0cdfc4cc7afa66d3c94272f1e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/5e2b3efed53c4ebe921987d412dfb135, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/24a8127a2f194084b31ce34776272241, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/b72b377b91fb4a4aaa97b0f88e6ebb57] to archive 2024-12-12T19:33:15,331 DEBUG [StoreCloser-TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:33:15,338 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/bce9068fcf9b44ad98977f2f0e7bbc3d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/bce9068fcf9b44ad98977f2f0e7bbc3d 2024-12-12T19:33:15,338 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/91e35e09cd924ffa8d79216341fcee8e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/91e35e09cd924ffa8d79216341fcee8e 2024-12-12T19:33:15,342 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/99608738123b4a11a5887f145f01b50a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/99608738123b4a11a5887f145f01b50a 2024-12-12T19:33:15,346 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/e42aae612ebe4db4bb8b523cb9b3cf77 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/e42aae612ebe4db4bb8b523cb9b3cf77 2024-12-12T19:33:15,351 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/a73bc7b5df06460e931085dc2ea02af1 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/a73bc7b5df06460e931085dc2ea02af1 2024-12-12T19:33:15,353 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/0287589c6a734ba3bca07f9cbaca15e5 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/0287589c6a734ba3bca07f9cbaca15e5 2024-12-12T19:33:15,353 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/93cbe951f33a4533913cf7e876b9bc8c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/93cbe951f33a4533913cf7e876b9bc8c 2024-12-12T19:33:15,354 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/e21481621ce642c884b36ea73248af5c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/e21481621ce642c884b36ea73248af5c 2024-12-12T19:33:15,354 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ea6ea2b9f2654ae8ab5b5f1c270f6e4e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ea6ea2b9f2654ae8ab5b5f1c270f6e4e 2024-12-12T19:33:15,354 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/43ed2f36cf934b7eab03e9720fd053af to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/43ed2f36cf934b7eab03e9720fd053af 2024-12-12T19:33:15,356 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/3d3fb9eff0344b608c370413542c955a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/3d3fb9eff0344b608c370413542c955a 2024-12-12T19:33:15,356 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ada4799cdddb41d987ce09814a1d485b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ada4799cdddb41d987ce09814a1d485b 2024-12-12T19:33:15,356 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/6b428d742dea478d9d963cf1c40533df to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/6b428d742dea478d9d963cf1c40533df 2024-12-12T19:33:15,357 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/55cc2e1fd0cd489a8dca821f8e2cbf50 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/55cc2e1fd0cd489a8dca821f8e2cbf50 2024-12-12T19:33:15,357 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/c460bb0c1fca4676998bdf81595fc357 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/c460bb0c1fca4676998bdf81595fc357 2024-12-12T19:33:15,359 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/3f1d0ca0cdfc4cc7afa66d3c94272f1e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/3f1d0ca0cdfc4cc7afa66d3c94272f1e 2024-12-12T19:33:15,359 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/5e2b3efed53c4ebe921987d412dfb135 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/5e2b3efed53c4ebe921987d412dfb135 2024-12-12T19:33:15,359 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/78c82177381b4af2bbb858a2cdf3fe9a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/78c82177381b4af2bbb858a2cdf3fe9a 2024-12-12T19:33:15,359 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ca2f78e1dc084ab5b29953f72fb31ed1 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/ca2f78e1dc084ab5b29953f72fb31ed1 2024-12-12T19:33:15,359 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/b72b377b91fb4a4aaa97b0f88e6ebb57 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/b72b377b91fb4a4aaa97b0f88e6ebb57 2024-12-12T19:33:15,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T19:33:15,360 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/24a8127a2f194084b31ce34776272241 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/24a8127a2f194084b31ce34776272241 2024-12-12T19:33:15,368 DEBUG [StoreCloser-TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/06c29e6bbea34ad8bb844967201e21f9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/d609d8a023df4093b835526bfb7917b1, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/bd319ac88ee24884b3ae886e245bb89a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/c58d6c8fcc96437e9ce3f2e40c1900b7, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/856472a57c2f4caaadcfed9d73835a56, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/dfd4ba19429e4c04a5261a92ddcce1dd, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/20f786bcfa964bb9bc468d943eeafe93, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/5626e4ed02ae410b9f0251ea4bac30f5, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/024cfc2629ea47818c0fb23e629c7e9e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/f64f1b5dc9e14c6a85bf99bfade53bf7, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/945e42ced943495390e4286ef8d8f545, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/7da79363171640b6b900d21b931eb9f3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/e34a6171f04841f5b80d99028842d23a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/91d5bbd693bb44d2a92d4a9122bfe9da, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/9713e6fe326f45f89b78e7395170cf6f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/dee1aa39c673412b81017648c3ac3584, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/486360422afb44469aaec93061def285, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/565004c141964dc99cbce80bf1f26e12, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/c0e33dfb36d64d399568049fb9c40582, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/8b946f4f0aba4962a0739e55594acb97, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/5a299975228e41fba8b0fc4170015045] to archive 2024-12-12T19:33:15,369 DEBUG [StoreCloser-TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:33:15,372 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/06c29e6bbea34ad8bb844967201e21f9 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/06c29e6bbea34ad8bb844967201e21f9 2024-12-12T19:33:15,373 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/c58d6c8fcc96437e9ce3f2e40c1900b7 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/c58d6c8fcc96437e9ce3f2e40c1900b7 2024-12-12T19:33:15,374 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/bd319ac88ee24884b3ae886e245bb89a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/bd319ac88ee24884b3ae886e245bb89a 2024-12-12T19:33:15,374 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/d609d8a023df4093b835526bfb7917b1 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/d609d8a023df4093b835526bfb7917b1 2024-12-12T19:33:15,375 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/20f786bcfa964bb9bc468d943eeafe93 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/20f786bcfa964bb9bc468d943eeafe93 2024-12-12T19:33:15,375 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/024cfc2629ea47818c0fb23e629c7e9e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/024cfc2629ea47818c0fb23e629c7e9e 2024-12-12T19:33:15,375 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/5626e4ed02ae410b9f0251ea4bac30f5 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/5626e4ed02ae410b9f0251ea4bac30f5 2024-12-12T19:33:15,375 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/856472a57c2f4caaadcfed9d73835a56 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/856472a57c2f4caaadcfed9d73835a56 2024-12-12T19:33:15,376 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/f64f1b5dc9e14c6a85bf99bfade53bf7 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/f64f1b5dc9e14c6a85bf99bfade53bf7 2024-12-12T19:33:15,376 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/7da79363171640b6b900d21b931eb9f3 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/7da79363171640b6b900d21b931eb9f3 2024-12-12T19:33:15,378 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/dfd4ba19429e4c04a5261a92ddcce1dd to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/dfd4ba19429e4c04a5261a92ddcce1dd 2024-12-12T19:33:15,379 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/e34a6171f04841f5b80d99028842d23a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/e34a6171f04841f5b80d99028842d23a 2024-12-12T19:33:15,380 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/91d5bbd693bb44d2a92d4a9122bfe9da to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/91d5bbd693bb44d2a92d4a9122bfe9da 2024-12-12T19:33:15,380 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/dee1aa39c673412b81017648c3ac3584 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/dee1aa39c673412b81017648c3ac3584 2024-12-12T19:33:15,380 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/9713e6fe326f45f89b78e7395170cf6f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/9713e6fe326f45f89b78e7395170cf6f 2024-12-12T19:33:15,380 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/945e42ced943495390e4286ef8d8f545 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/945e42ced943495390e4286ef8d8f545 2024-12-12T19:33:15,381 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/565004c141964dc99cbce80bf1f26e12 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/565004c141964dc99cbce80bf1f26e12 2024-12-12T19:33:15,384 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/486360422afb44469aaec93061def285 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/486360422afb44469aaec93061def285 2024-12-12T19:33:15,385 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/8b946f4f0aba4962a0739e55594acb97 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/8b946f4f0aba4962a0739e55594acb97 2024-12-12T19:33:15,385 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/c0e33dfb36d64d399568049fb9c40582 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/c0e33dfb36d64d399568049fb9c40582 2024-12-12T19:33:15,385 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/5a299975228e41fba8b0fc4170015045 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/5a299975228e41fba8b0fc4170015045 2024-12-12T19:33:15,386 DEBUG [StoreCloser-TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/0e29d29a60b34f8cbaf00c63c4f6e291, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f710db0f8b2a4bffab8e9ad326b35b5c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/b4aeaab8938644d18c8e9a6955c56a11, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/cfdaf60449f64f59a78d36bc7fab5b1a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/b642c18affd9420bac19fe1eb39ad308, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/0c20e14fa3224017abd7fbd33cb1260a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/e1ecc442f2984271a10285a1344b3d1f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a43bbcbbe70b4ba0af55f62df3981113, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a491b5fcfae44d1994b28b41ff7d5cbd, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a425942f4a0d4572948fc532bf350f9b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/70a379f27c314012b5162fbd8a52ddc6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/9316e9f2dbfc43e6a7671a5a902e43ec, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a70ff57801ce473e8d291b23bece196c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/247f6e0728ed477cac6244980a8c6933, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/8a6fe4a29569480da14465b9640d4828, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/063cb69611204ef9a37a0929bc33bb91, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f54acd49c60a4fe9bd6fe7f874f062cc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/fa2248fbc09b4c4aa35238b7c6173829, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/1af9255524d043be945f502e0030409f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/267918a0887846b3858ce7e7cd82e6e3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f7402b9305d249cf97568960f7e13e61] to archive 2024-12-12T19:33:15,389 DEBUG [StoreCloser-TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:33:15,392 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/cfdaf60449f64f59a78d36bc7fab5b1a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/cfdaf60449f64f59a78d36bc7fab5b1a 2024-12-12T19:33:15,392 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f710db0f8b2a4bffab8e9ad326b35b5c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f710db0f8b2a4bffab8e9ad326b35b5c 2024-12-12T19:33:15,392 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/0c20e14fa3224017abd7fbd33cb1260a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/0c20e14fa3224017abd7fbd33cb1260a 2024-12-12T19:33:15,393 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/b642c18affd9420bac19fe1eb39ad308 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/b642c18affd9420bac19fe1eb39ad308 2024-12-12T19:33:15,393 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/b4aeaab8938644d18c8e9a6955c56a11 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/b4aeaab8938644d18c8e9a6955c56a11 2024-12-12T19:33:15,394 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/0e29d29a60b34f8cbaf00c63c4f6e291 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/0e29d29a60b34f8cbaf00c63c4f6e291 2024-12-12T19:33:15,394 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a491b5fcfae44d1994b28b41ff7d5cbd to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a491b5fcfae44d1994b28b41ff7d5cbd 2024-12-12T19:33:15,395 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/e1ecc442f2984271a10285a1344b3d1f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/e1ecc442f2984271a10285a1344b3d1f 2024-12-12T19:33:15,395 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a43bbcbbe70b4ba0af55f62df3981113 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a43bbcbbe70b4ba0af55f62df3981113 2024-12-12T19:33:15,395 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/70a379f27c314012b5162fbd8a52ddc6 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/70a379f27c314012b5162fbd8a52ddc6 2024-12-12T19:33:15,396 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a425942f4a0d4572948fc532bf350f9b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a425942f4a0d4572948fc532bf350f9b 2024-12-12T19:33:15,396 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a70ff57801ce473e8d291b23bece196c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a70ff57801ce473e8d291b23bece196c 2024-12-12T19:33:15,400 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/063cb69611204ef9a37a0929bc33bb91 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/063cb69611204ef9a37a0929bc33bb91 2024-12-12T19:33:15,400 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/8a6fe4a29569480da14465b9640d4828 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/8a6fe4a29569480da14465b9640d4828 2024-12-12T19:33:15,400 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/9316e9f2dbfc43e6a7671a5a902e43ec to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/9316e9f2dbfc43e6a7671a5a902e43ec 2024-12-12T19:33:15,401 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/247f6e0728ed477cac6244980a8c6933 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/247f6e0728ed477cac6244980a8c6933 2024-12-12T19:33:15,401 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/fa2248fbc09b4c4aa35238b7c6173829 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/fa2248fbc09b4c4aa35238b7c6173829 2024-12-12T19:33:15,401 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f54acd49c60a4fe9bd6fe7f874f062cc to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f54acd49c60a4fe9bd6fe7f874f062cc 2024-12-12T19:33:15,401 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/267918a0887846b3858ce7e7cd82e6e3 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/267918a0887846b3858ce7e7cd82e6e3 2024-12-12T19:33:15,401 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f7402b9305d249cf97568960f7e13e61 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/f7402b9305d249cf97568960f7e13e61 2024-12-12T19:33:15,405 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/1af9255524d043be945f502e0030409f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/1af9255524d043be945f502e0030409f 2024-12-12T19:33:15,421 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/recovered.edits/315.seqid, newMaxSeqId=315, maxSeqId=4 2024-12-12T19:33:15,423 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146. 2024-12-12T19:33:15,423 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1635): Region close journal for 2c602e7d1968409c889fb12ef89e5146: 2024-12-12T19:33:15,425 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] handler.UnassignRegionHandler(170): Closed 2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:15,426 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=57 updating hbase:meta row=2c602e7d1968409c889fb12ef89e5146, regionState=CLOSED 2024-12-12T19:33:15,430 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-12T19:33:15,430 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; CloseRegionProcedure 2c602e7d1968409c889fb12ef89e5146, server=4c9c438b6eeb,42689,1734031923038 in 2.1530 sec 2024-12-12T19:33:15,432 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=57, resume processing ppid=56 2024-12-12T19:33:15,432 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, ppid=56, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c602e7d1968409c889fb12ef89e5146, UNASSIGN in 2.1610 sec 2024-12-12T19:33:15,435 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-12T19:33:15,435 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.1660 sec 2024-12-12T19:33:15,436 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734031995436"}]},"ts":"1734031995436"} 2024-12-12T19:33:15,439 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T19:33:15,667 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T19:33:15,672 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.4630 sec 2024-12-12T19:33:17,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T19:33:17,363 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-12T19:33:17,364 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T19:33:17,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:17,376 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=59, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:17,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T19:33:17,377 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=59, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:17,379 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,392 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/recovered.edits] 2024-12-12T19:33:17,402 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/f506f694badf484ea7bf9e408104d7e1 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/f506f694badf484ea7bf9e408104d7e1 2024-12-12T19:33:17,402 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/529d6d754c19440d9ace06eff7e22369 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/A/529d6d754c19440d9ace06eff7e22369 2024-12-12T19:33:17,420 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/34ea49ac49c4441f8426ddd5419515aa to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/34ea49ac49c4441f8426ddd5419515aa 2024-12-12T19:33:17,420 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/b71d1a24f3bc415abed830883962ff0f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/B/b71d1a24f3bc415abed830883962ff0f 2024-12-12T19:33:17,443 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a8211d717bc84e16aefdaaca7fde7940 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/a8211d717bc84e16aefdaaca7fde7940 2024-12-12T19:33:17,443 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/da2228c978224241a3b7951be40e0a7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/C/da2228c978224241a3b7951be40e0a7f 2024-12-12T19:33:17,462 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/recovered.edits/315.seqid to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146/recovered.edits/315.seqid 2024-12-12T19:33:17,468 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,469 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T19:33:17,471 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T19:33:17,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T19:33:17,479 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-12T19:33:17,506 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121203c7655b0e0d4a288f04006a39876dd2_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121203c7655b0e0d4a288f04006a39876dd2_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,506 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212445eb9f37dd74b9d912d4f061168533b_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212445eb9f37dd74b9d912d4f061168533b_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,507 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412121eeaef432c584c67a759076f9817a573_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412121eeaef432c584c67a759076f9817a573_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,507 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123313e00cb65045bb81e54a2032ca5271_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123313e00cb65045bb81e54a2032ca5271_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,507 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124df5fef78bcb430b9d0ac4d4f0c975d4_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124df5fef78bcb430b9d0ac4d4f0c975d4_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,508 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212524e67b6b29b4761b8703eada2ad918a_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212524e67b6b29b4761b8703eada2ad918a_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,509 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128041dccf25084df9b2f898e06c365a85_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128041dccf25084df9b2f898e06c365a85_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,509 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212806997cd89eb4c54b0f7f43adc9bec5a_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212806997cd89eb4c54b0f7f43adc9bec5a_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,512 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121289cde2eff09144efbdc793443d73c815_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121289cde2eff09144efbdc793443d73c815_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,512 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128bee1886c16944c4b37ec5f3ffa694be_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128bee1886c16944c4b37ec5f3ffa694be_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,513 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a2e4ef63b9ee4f87939c596ed2bc4fe1_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a2e4ef63b9ee4f87939c596ed2bc4fe1_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,514 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121299143ae49cb346ffa444e150d605ed39_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121299143ae49cb346ffa444e150d605ed39_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,514 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a086b330102546a9ac074a21aeabc9cd_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a086b330102546a9ac074a21aeabc9cd_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,514 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a404ddfb3ce7481da117b9a92ae7569d_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a404ddfb3ce7481da117b9a92ae7569d_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,515 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a1dae4a6ece24e758d5945f4ddccaf6f_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a1dae4a6ece24e758d5945f4ddccaf6f_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,515 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a5b1e6aeec4f4f50a04856c11c508c08_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a5b1e6aeec4f4f50a04856c11c508c08_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,528 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f53ed769ca0244cf9c9cff03858ec59e_2c602e7d1968409c889fb12ef89e5146 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f53ed769ca0244cf9c9cff03858ec59e_2c602e7d1968409c889fb12ef89e5146 2024-12-12T19:33:17,534 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T19:33:17,540 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=59, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:17,556 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T19:33:17,572 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T19:33:17,574 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=59, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:17,575 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T19:33:17,575 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734031997575"}]},"ts":"9223372036854775807"} 2024-12-12T19:33:17,589 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T19:33:17,590 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 2c602e7d1968409c889fb12ef89e5146, NAME => 'TestAcidGuarantees,,1734031960205.2c602e7d1968409c889fb12ef89e5146.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T19:33:17,590 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T19:33:17,590 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734031997590"}]},"ts":"9223372036854775807"} 2024-12-12T19:33:17,597 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T19:33:17,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T19:33:17,897 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=59, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:17,899 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 533 msec 2024-12-12T19:33:17,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T19:33:17,980 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-12T19:33:18,003 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=243 (was 244), OpenFileDescriptor=448 (was 457), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1471 (was 1254) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6226 (was 9938) 2024-12-12T19:33:18,019 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=243, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=1471, ProcessCount=11, AvailableMemoryMB=6225 2024-12-12T19:33:18,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T19:33:18,021 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T19:33:18,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=60, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:18,023 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T19:33:18,024 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:18,025 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T19:33:18,025 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 60 2024-12-12T19:33:18,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-12T19:33:18,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742015_1191 (size=963) 2024-12-12T19:33:18,100 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98 2024-12-12T19:33:18,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-12T19:33:18,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742016_1192 (size=53) 2024-12-12T19:33:18,179 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:33:18,179 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing f61d5e1e16d46799d7435009ba841107, disabling compactions & flushes 2024-12-12T19:33:18,179 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:18,179 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:18,179 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. after waiting 0 ms 2024-12-12T19:33:18,179 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:18,179 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:18,179 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:18,183 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T19:33:18,184 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734031998183"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734031998183"}]},"ts":"1734031998183"} 2024-12-12T19:33:18,193 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T19:33:18,200 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T19:33:18,200 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734031998200"}]},"ts":"1734031998200"} 2024-12-12T19:33:18,210 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T19:33:18,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-12T19:33:18,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-12T19:33:18,938 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f61d5e1e16d46799d7435009ba841107, ASSIGN}] 2024-12-12T19:33:18,939 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f61d5e1e16d46799d7435009ba841107, ASSIGN 2024-12-12T19:33:18,944 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=f61d5e1e16d46799d7435009ba841107, ASSIGN; state=OFFLINE, location=4c9c438b6eeb,42689,1734031923038; forceNewPlan=false, retain=false 2024-12-12T19:33:19,098 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=f61d5e1e16d46799d7435009ba841107, regionState=OPENING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:19,123 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; OpenRegionProcedure f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:33:19,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-12T19:33:19,283 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:19,311 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:19,311 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7285): Opening region: {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} 2024-12-12T19:33:19,312 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:19,312 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:33:19,312 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7327): checking encryption for f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:19,312 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7330): checking classloading for f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:19,327 INFO [StoreOpener-f61d5e1e16d46799d7435009ba841107-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:19,343 INFO [StoreOpener-f61d5e1e16d46799d7435009ba841107-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:33:19,344 INFO [StoreOpener-f61d5e1e16d46799d7435009ba841107-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f61d5e1e16d46799d7435009ba841107 columnFamilyName A 2024-12-12T19:33:19,344 DEBUG [StoreOpener-f61d5e1e16d46799d7435009ba841107-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:19,355 INFO [StoreOpener-f61d5e1e16d46799d7435009ba841107-1 {}] regionserver.HStore(327): Store=f61d5e1e16d46799d7435009ba841107/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:33:19,355 INFO [StoreOpener-f61d5e1e16d46799d7435009ba841107-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:19,371 INFO [StoreOpener-f61d5e1e16d46799d7435009ba841107-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:33:19,371 INFO [StoreOpener-f61d5e1e16d46799d7435009ba841107-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f61d5e1e16d46799d7435009ba841107 columnFamilyName B 2024-12-12T19:33:19,372 DEBUG [StoreOpener-f61d5e1e16d46799d7435009ba841107-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:19,374 INFO [StoreOpener-f61d5e1e16d46799d7435009ba841107-1 {}] regionserver.HStore(327): Store=f61d5e1e16d46799d7435009ba841107/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:33:19,375 INFO [StoreOpener-f61d5e1e16d46799d7435009ba841107-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:19,380 INFO [StoreOpener-f61d5e1e16d46799d7435009ba841107-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:33:19,380 INFO [StoreOpener-f61d5e1e16d46799d7435009ba841107-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f61d5e1e16d46799d7435009ba841107 columnFamilyName C 2024-12-12T19:33:19,380 DEBUG [StoreOpener-f61d5e1e16d46799d7435009ba841107-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:19,383 INFO [StoreOpener-f61d5e1e16d46799d7435009ba841107-1 {}] regionserver.HStore(327): Store=f61d5e1e16d46799d7435009ba841107/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:33:19,383 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:19,391 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:19,395 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:19,407 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T19:33:19,423 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1085): writing seq id for f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:19,450 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T19:33:19,452 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1102): Opened f61d5e1e16d46799d7435009ba841107; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67058834, jitterRate=-7.455050945281982E-4}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T19:33:19,453 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1001): Region open journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:19,463 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., pid=62, masterSystemTime=1734031999283 2024-12-12T19:33:19,468 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=f61d5e1e16d46799d7435009ba841107, regionState=OPEN, openSeqNum=2, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:19,469 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:19,469 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:19,473 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40199 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=4c9c438b6eeb,42689,1734031923038, table=TestAcidGuarantees, region=f61d5e1e16d46799d7435009ba841107. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-12T19:33:19,484 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-12T19:33:19,484 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; OpenRegionProcedure f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 in 359 msec 2024-12-12T19:33:19,487 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=60 2024-12-12T19:33:19,487 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=60, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=f61d5e1e16d46799d7435009ba841107, ASSIGN in 547 msec 2024-12-12T19:33:19,488 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T19:33:19,488 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734031999488"}]},"ts":"1734031999488"} 2024-12-12T19:33:19,497 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T19:33:19,739 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T19:33:19,741 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.7180 sec 2024-12-12T19:33:20,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-12T19:33:20,149 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 60 completed 2024-12-12T19:33:20,151 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4ec09297 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8d0caa5 2024-12-12T19:33:20,448 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34cb3991, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:20,470 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:20,473 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37324, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:20,485 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T19:33:20,492 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33188, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T19:33:20,495 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4dfb20f6 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@43f04e0e 2024-12-12T19:33:20,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e9ae050, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:20,952 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17cf7fc0 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@560ec309 2024-12-12T19:33:20,968 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T19:33:21,368 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fef31f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:21,369 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78b04266 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5886c0f2 2024-12-12T19:33:21,759 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@eb04aeb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:21,761 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x088aa519 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@66e575aa 2024-12-12T19:33:22,057 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a0e9c8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:22,059 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e998dd3 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@131ceb8f 2024-12-12T19:33:22,314 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d68f787, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:22,316 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a78bf6d to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10e6bf6a 2024-12-12T19:33:22,526 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@605827c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:22,529 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x328852db to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1730a60f 2024-12-12T19:33:22,754 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3677bd4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:22,755 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4b9e2976 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@598cfed4 2024-12-12T19:33:22,987 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@521aad6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:22,988 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x56e9a678 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68ad882f 2024-12-12T19:33:23,224 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f5b2180, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:23,224 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2f7f772a to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b976e1a 2024-12-12T19:33:23,472 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1df61dc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:23,475 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:33:23,476 DEBUG [hconnection-0x7fb9ec8a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:23,477 DEBUG [hconnection-0x19efb204-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:23,477 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:23,478 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37336, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:23,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-12-12T19:33:23,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T19:33:23,482 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:33:23,483 DEBUG [hconnection-0x16b5524-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:23,483 DEBUG [hconnection-0x46b42b7f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:23,483 DEBUG [hconnection-0x3b50faee-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:23,485 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37350, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:23,486 DEBUG [hconnection-0x539e15d1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:23,486 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:33:23,486 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37356, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:23,487 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:33:23,488 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37366, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:23,488 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37370, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:23,489 DEBUG [hconnection-0x7cc5315f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:23,492 DEBUG [hconnection-0x5cf8acd5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:23,495 DEBUG [hconnection-0x5a6fbe4a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:23,495 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37384, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:23,495 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37380, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:23,495 DEBUG [hconnection-0x39dcce67-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:23,497 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37392, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:23,499 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37396, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:23,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:23,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T19:33:23,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:23,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:23,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:23,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:23,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:23,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:23,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/2ae32f170e304bce8c6aabed2fead8b2 is 50, key is test_row_0/A:col10/1734032003498/Put/seqid=0 2024-12-12T19:33:23,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:23,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032063554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:23,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032063555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:23,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032063562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:23,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032063564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:23,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032063565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742017_1193 (size=12001) 2024-12-12T19:33:23,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T19:33:23,643 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,643 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-12T19:33:23,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:23,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:23,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:23,644 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:23,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:23,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:23,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:23,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032063666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:23,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032063667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:23,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032063670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:23,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032063671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:23,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032063671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T19:33:23,796 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-12T19:33:23,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:23,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:23,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:23,797 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:23,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:23,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:23,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:23,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032063871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:23,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032063872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:23,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032063873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:23,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032063879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:23,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032063883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,950 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:23,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-12T19:33:23,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:23,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:23,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:23,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:23,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:23,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:23,987 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/2ae32f170e304bce8c6aabed2fead8b2 2024-12-12T19:33:24,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/b1c834c713504768ae3713fffc6543c2 is 50, key is test_row_0/B:col10/1734032003498/Put/seqid=0 2024-12-12T19:33:24,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742018_1194 (size=12001) 2024-12-12T19:33:24,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T19:33:24,084 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/b1c834c713504768ae3713fffc6543c2 2024-12-12T19:33:24,102 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-12T19:33:24,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:24,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:24,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:24,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:24,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:24,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:24,133 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/d45437a1c9534364861fa7c63198c103 is 50, key is test_row_0/C:col10/1734032003498/Put/seqid=0 2024-12-12T19:33:24,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742019_1195 (size=12001) 2024-12-12T19:33:24,168 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/d45437a1c9534364861fa7c63198c103 2024-12-12T19:33:24,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/2ae32f170e304bce8c6aabed2fead8b2 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/2ae32f170e304bce8c6aabed2fead8b2 2024-12-12T19:33:24,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:24,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032064173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:24,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032064176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:24,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032064176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/2ae32f170e304bce8c6aabed2fead8b2, entries=150, sequenceid=15, filesize=11.7 K 2024-12-12T19:33:24,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/b1c834c713504768ae3713fffc6543c2 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/b1c834c713504768ae3713fffc6543c2 2024-12-12T19:33:24,189 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/b1c834c713504768ae3713fffc6543c2, entries=150, sequenceid=15, filesize=11.7 K 2024-12-12T19:33:24,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/d45437a1c9534364861fa7c63198c103 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/d45437a1c9534364861fa7c63198c103 2024-12-12T19:33:24,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:24,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032064190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:24,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032064191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,223 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/d45437a1c9534364861fa7c63198c103, entries=150, sequenceid=15, filesize=11.7 K 2024-12-12T19:33:24,224 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for f61d5e1e16d46799d7435009ba841107 in 724ms, sequenceid=15, compaction requested=false 2024-12-12T19:33:24,224 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-12T19:33:24,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:24,255 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-12T19:33:24,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:24,255 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T19:33:24,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:24,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:24,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:24,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:24,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:24,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:24,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/81ab8ffd524a42a49e24fb1979d33378 is 50, key is test_row_0/A:col10/1734032003545/Put/seqid=0 2024-12-12T19:33:24,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742020_1196 (size=12001) 2024-12-12T19:33:24,286 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/81ab8ffd524a42a49e24fb1979d33378 2024-12-12T19:33:24,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/63af6d0c4435429ea3234eb2d66b80af is 50, key is test_row_0/B:col10/1734032003545/Put/seqid=0 2024-12-12T19:33:24,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742021_1197 (size=12001) 2024-12-12T19:33:24,324 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/63af6d0c4435429ea3234eb2d66b80af 2024-12-12T19:33:24,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/4c78d8ff4f4a43119c143afa2f882113 is 50, key is test_row_0/C:col10/1734032003545/Put/seqid=0 2024-12-12T19:33:24,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742022_1198 (size=12001) 2024-12-12T19:33:24,367 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/4c78d8ff4f4a43119c143afa2f882113 2024-12-12T19:33:24,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/81ab8ffd524a42a49e24fb1979d33378 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/81ab8ffd524a42a49e24fb1979d33378 2024-12-12T19:33:24,385 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/81ab8ffd524a42a49e24fb1979d33378, entries=150, sequenceid=37, filesize=11.7 K 2024-12-12T19:33:24,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/63af6d0c4435429ea3234eb2d66b80af as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/63af6d0c4435429ea3234eb2d66b80af 2024-12-12T19:33:24,408 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/63af6d0c4435429ea3234eb2d66b80af, entries=150, sequenceid=37, filesize=11.7 K 2024-12-12T19:33:24,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/4c78d8ff4f4a43119c143afa2f882113 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/4c78d8ff4f4a43119c143afa2f882113 2024-12-12T19:33:24,429 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/4c78d8ff4f4a43119c143afa2f882113, entries=150, sequenceid=37, filesize=11.7 K 2024-12-12T19:33:24,435 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for f61d5e1e16d46799d7435009ba841107 in 180ms, sequenceid=37, compaction requested=false 2024-12-12T19:33:24,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:24,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:24,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-12-12T19:33:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-12-12T19:33:24,456 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-12T19:33:24,457 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 966 msec 2024-12-12T19:33:24,458 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 982 msec 2024-12-12T19:33:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T19:33:24,593 INFO [Thread-932 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-12T19:33:24,605 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:33:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees 2024-12-12T19:33:24,608 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:33:24,608 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:33:24,609 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:33:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T19:33:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:24,699 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T19:33:24,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:24,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:24,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:24,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:24,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:24,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T19:33:24,713 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/c809b330744b42e49a4452cd5d6b4523 is 50, key is test_row_0/A:col10/1734032004693/Put/seqid=0 2024-12-12T19:33:24,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742023_1199 (size=12001) 2024-12-12T19:33:24,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032064728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032064729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032064728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032064730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032064733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,762 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:24,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:24,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:24,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:24,763 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:24,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:24,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:24,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032064832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:24,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032064834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:24,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032064834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:24,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032064835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:24,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032064835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T19:33:24,915 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:24,916 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:24,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:24,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:24,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:24,916 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:24,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:24,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032065035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032065039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032065039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032065043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032065043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,070 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,070 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:25,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:25,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:25,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:25,071 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/c809b330744b42e49a4452cd5d6b4523 2024-12-12T19:33:25,136 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/2d4dd810efd240d192711b32ba86fba4 is 50, key is test_row_0/B:col10/1734032004693/Put/seqid=0 2024-12-12T19:33:25,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742024_1200 (size=12001) 2024-12-12T19:33:25,180 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/2d4dd810efd240d192711b32ba86fba4 2024-12-12T19:33:25,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T19:33:25,227 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,228 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:25,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:25,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:25,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:25,229 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/dcabf9a5ac2343d084247560bbc7c912 is 50, key is test_row_0/C:col10/1734032004693/Put/seqid=0 2024-12-12T19:33:25,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742025_1201 (size=12001) 2024-12-12T19:33:25,260 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/dcabf9a5ac2343d084247560bbc7c912 2024-12-12T19:33:25,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/c809b330744b42e49a4452cd5d6b4523 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/c809b330744b42e49a4452cd5d6b4523 2024-12-12T19:33:25,269 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/c809b330744b42e49a4452cd5d6b4523, entries=150, sequenceid=52, filesize=11.7 K 2024-12-12T19:33:25,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/2d4dd810efd240d192711b32ba86fba4 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/2d4dd810efd240d192711b32ba86fba4 2024-12-12T19:33:25,306 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/2d4dd810efd240d192711b32ba86fba4, entries=150, sequenceid=52, filesize=11.7 K 2024-12-12T19:33:25,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/dcabf9a5ac2343d084247560bbc7c912 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/dcabf9a5ac2343d084247560bbc7c912 2024-12-12T19:33:25,324 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/dcabf9a5ac2343d084247560bbc7c912, entries=150, sequenceid=52, filesize=11.7 K 2024-12-12T19:33:25,324 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for f61d5e1e16d46799d7435009ba841107 in 625ms, sequenceid=52, compaction requested=true 2024-12-12T19:33:25,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:25,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:33:25,325 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:25,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:25,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:33:25,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:25,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:33:25,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:25,325 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:25,327 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:25,327 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/B is initiating minor compaction (all files) 2024-12-12T19:33:25,327 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/B in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:25,327 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/b1c834c713504768ae3713fffc6543c2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/63af6d0c4435429ea3234eb2d66b80af, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/2d4dd810efd240d192711b32ba86fba4] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=35.2 K 2024-12-12T19:33:25,327 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:25,327 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/A is initiating minor compaction (all files) 2024-12-12T19:33:25,328 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/A in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:25,328 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/2ae32f170e304bce8c6aabed2fead8b2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/81ab8ffd524a42a49e24fb1979d33378, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/c809b330744b42e49a4452cd5d6b4523] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=35.2 K 2024-12-12T19:33:25,329 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting b1c834c713504768ae3713fffc6543c2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734032003481 2024-12-12T19:33:25,329 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ae32f170e304bce8c6aabed2fead8b2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734032003481 2024-12-12T19:33:25,329 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 63af6d0c4435429ea3234eb2d66b80af, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1734032003545 2024-12-12T19:33:25,330 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81ab8ffd524a42a49e24fb1979d33378, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1734032003545 2024-12-12T19:33:25,331 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d4dd810efd240d192711b32ba86fba4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734032004693 2024-12-12T19:33:25,331 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting c809b330744b42e49a4452cd5d6b4523, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734032004693 2024-12-12T19:33:25,342 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#B#compaction#159 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:25,342 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/a941d2773aef4ff299ec62d95b007e29 is 50, key is test_row_0/B:col10/1734032004693/Put/seqid=0 2024-12-12T19:33:25,344 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T19:33:25,344 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#A#compaction#160 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:25,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:25,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:25,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:25,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:25,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:25,345 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/71c94fdf8f7840a7b3a7604b4d794198 is 50, key is test_row_0/A:col10/1734032004693/Put/seqid=0 2024-12-12T19:33:25,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:25,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:25,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032065354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032065354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032065355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032065358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032065357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/818600e51c4444e78023d1d02ea6dcf0 is 50, key is test_row_0/A:col10/1734032005342/Put/seqid=0 2024-12-12T19:33:25,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742027_1203 (size=12104) 2024-12-12T19:33:25,380 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/71c94fdf8f7840a7b3a7604b4d794198 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/71c94fdf8f7840a7b3a7604b4d794198 2024-12-12T19:33:25,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742026_1202 (size=12104) 2024-12-12T19:33:25,391 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/a941d2773aef4ff299ec62d95b007e29 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a941d2773aef4ff299ec62d95b007e29 2024-12-12T19:33:25,391 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,391 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:25,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:25,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:25,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:25,392 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742028_1204 (size=12001) 2024-12-12T19:33:25,404 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/818600e51c4444e78023d1d02ea6dcf0 2024-12-12T19:33:25,416 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/B of f61d5e1e16d46799d7435009ba841107 into a941d2773aef4ff299ec62d95b007e29(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:25,416 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:25,416 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/B, priority=13, startTime=1734032005325; duration=0sec 2024-12-12T19:33:25,416 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/A of f61d5e1e16d46799d7435009ba841107 into 71c94fdf8f7840a7b3a7604b4d794198(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:25,416 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:25,417 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/A, priority=13, startTime=1734032005325; duration=0sec 2024-12-12T19:33:25,417 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:25,417 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:B 2024-12-12T19:33:25,417 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:25,417 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:25,417 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:A 2024-12-12T19:33:25,429 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:25,429 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/C is initiating minor compaction (all files) 2024-12-12T19:33:25,429 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/C in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:25,429 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/d45437a1c9534364861fa7c63198c103, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/4c78d8ff4f4a43119c143afa2f882113, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/dcabf9a5ac2343d084247560bbc7c912] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=35.2 K 2024-12-12T19:33:25,435 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting d45437a1c9534364861fa7c63198c103, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734032003481 2024-12-12T19:33:25,440 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c78d8ff4f4a43119c143afa2f882113, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1734032003545 2024-12-12T19:33:25,441 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting dcabf9a5ac2343d084247560bbc7c912, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734032004693 2024-12-12T19:33:25,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/36a2d9ec9ecc4e8790973e3f7166ecaf is 50, key is test_row_0/B:col10/1734032005342/Put/seqid=0 2024-12-12T19:33:25,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032065459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032065460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032065465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032065465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032065463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,475 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#C#compaction#163 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:25,476 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/47f042d63e094abbbb71f22a06d3cc0a is 50, key is test_row_0/C:col10/1734032004693/Put/seqid=0 2024-12-12T19:33:25,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742029_1205 (size=12001) 2024-12-12T19:33:25,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742030_1206 (size=12104) 2024-12-12T19:33:25,551 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:25,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:25,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:25,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:25,554 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032065664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032065671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032065671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032065673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032065679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,708 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,709 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:25,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:25,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:25,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:25,709 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T19:33:25,861 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,862 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:25,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:25,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:25,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:25,862 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:25,892 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/36a2d9ec9ecc4e8790973e3f7166ecaf 2024-12-12T19:33:25,903 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/fe7463bc754748b68f404f135126b77f is 50, key is test_row_0/C:col10/1734032005342/Put/seqid=0 2024-12-12T19:33:25,921 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/47f042d63e094abbbb71f22a06d3cc0a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/47f042d63e094abbbb71f22a06d3cc0a 2024-12-12T19:33:25,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742031_1207 (size=12001) 2024-12-12T19:33:25,935 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/C of f61d5e1e16d46799d7435009ba841107 into 47f042d63e094abbbb71f22a06d3cc0a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:25,936 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:25,936 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/C, priority=13, startTime=1734032005325; duration=0sec 2024-12-12T19:33:25,936 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:25,936 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:C 2024-12-12T19:33:25,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032065975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032065983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032065983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032065991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:25,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:25,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032065992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,017 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,023 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:26,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:26,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:26,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:26,024 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,187 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,191 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:26,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:26,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:26,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:26,195 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,324 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/fe7463bc754748b68f404f135126b77f 2024-12-12T19:33:26,354 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:26,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:26,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:26,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:26,354 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/818600e51c4444e78023d1d02ea6dcf0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/818600e51c4444e78023d1d02ea6dcf0 2024-12-12T19:33:26,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/818600e51c4444e78023d1d02ea6dcf0, entries=150, sequenceid=74, filesize=11.7 K 2024-12-12T19:33:26,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/36a2d9ec9ecc4e8790973e3f7166ecaf as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/36a2d9ec9ecc4e8790973e3f7166ecaf 2024-12-12T19:33:26,425 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/36a2d9ec9ecc4e8790973e3f7166ecaf, entries=150, sequenceid=74, filesize=11.7 K 2024-12-12T19:33:26,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/fe7463bc754748b68f404f135126b77f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/fe7463bc754748b68f404f135126b77f 2024-12-12T19:33:26,440 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/fe7463bc754748b68f404f135126b77f, entries=150, sequenceid=74, filesize=11.7 K 2024-12-12T19:33:26,442 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for f61d5e1e16d46799d7435009ba841107 in 1098ms, sequenceid=74, compaction requested=false 2024-12-12T19:33:26,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:26,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-12T19:33:26,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:26,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:26,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:26,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:26,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:26,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:26,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:26,512 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/8f94e3f0b30843678b76e862cc123a61 is 50, key is test_row_0/A:col10/1734032006496/Put/seqid=0 2024-12-12T19:33:26,522 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:26,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:26,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:26,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:26,524 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742032_1208 (size=12001) 2024-12-12T19:33:26,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:26,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032066550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:26,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032066559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:26,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032066564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:26,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032066564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:26,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032066568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:26,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032066670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:26,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032066675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:26,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032066679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:26,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032066683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:26,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032066687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,692 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:26,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:26,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:26,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:26,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T19:33:26,850 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:26,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:26,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:26,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:26,852 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:26,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:26,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032066871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:26,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032066879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:26,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032066889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:26,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032066893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:26,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032066894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:26,947 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/8f94e3f0b30843678b76e862cc123a61 2024-12-12T19:33:26,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/2028c39bf29c4bf9afee8bb3cc682497 is 50, key is test_row_0/B:col10/1734032006496/Put/seqid=0 2024-12-12T19:33:27,014 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:27,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:27,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:27,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:27,015 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:27,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:27,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:27,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742033_1209 (size=12001) 2024-12-12T19:33:27,171 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,173 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:27,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:27,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:27,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:27,174 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:27,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:27,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:27,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:27,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032067177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:27,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032067185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:27,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032067193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:27,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032067197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:27,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032067203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,327 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,327 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:27,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:27,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:27,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:27,327 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:27,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:27,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:27,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/2028c39bf29c4bf9afee8bb3cc682497 2024-12-12T19:33:27,483 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/514835e4c7ed44f8b024994eabe3bb5d is 50, key is test_row_0/C:col10/1734032006496/Put/seqid=0 2024-12-12T19:33:27,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:27,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:27,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:27,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:27,488 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:27,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:27,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:27,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742034_1210 (size=12001) 2024-12-12T19:33:27,527 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/514835e4c7ed44f8b024994eabe3bb5d 2024-12-12T19:33:27,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/8f94e3f0b30843678b76e862cc123a61 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/8f94e3f0b30843678b76e862cc123a61 2024-12-12T19:33:27,559 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/8f94e3f0b30843678b76e862cc123a61, entries=150, sequenceid=95, filesize=11.7 K 2024-12-12T19:33:27,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/2028c39bf29c4bf9afee8bb3cc682497 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/2028c39bf29c4bf9afee8bb3cc682497 2024-12-12T19:33:27,567 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/2028c39bf29c4bf9afee8bb3cc682497, entries=150, sequenceid=95, filesize=11.7 K 2024-12-12T19:33:27,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/514835e4c7ed44f8b024994eabe3bb5d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/514835e4c7ed44f8b024994eabe3bb5d 2024-12-12T19:33:27,574 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/514835e4c7ed44f8b024994eabe3bb5d, entries=150, sequenceid=95, filesize=11.7 K 2024-12-12T19:33:27,575 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for f61d5e1e16d46799d7435009ba841107 in 1076ms, sequenceid=95, compaction requested=true 2024-12-12T19:33:27,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:27,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:33:27,575 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:27,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:27,575 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:27,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:33:27,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:27,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:33:27,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:27,578 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:27,578 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/A is initiating minor compaction (all files) 2024-12-12T19:33:27,579 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/A in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:27,579 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/71c94fdf8f7840a7b3a7604b4d794198, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/818600e51c4444e78023d1d02ea6dcf0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/8f94e3f0b30843678b76e862cc123a61] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=35.3 K 2024-12-12T19:33:27,579 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:27,579 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/B is initiating minor compaction (all files) 2024-12-12T19:33:27,579 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/B in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:27,579 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a941d2773aef4ff299ec62d95b007e29, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/36a2d9ec9ecc4e8790973e3f7166ecaf, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/2028c39bf29c4bf9afee8bb3cc682497] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=35.3 K 2024-12-12T19:33:27,580 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting a941d2773aef4ff299ec62d95b007e29, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734032004693 2024-12-12T19:33:27,580 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71c94fdf8f7840a7b3a7604b4d794198, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734032004693 2024-12-12T19:33:27,581 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 818600e51c4444e78023d1d02ea6dcf0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1734032004726 2024-12-12T19:33:27,581 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 36a2d9ec9ecc4e8790973e3f7166ecaf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1734032004726 2024-12-12T19:33:27,582 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f94e3f0b30843678b76e862cc123a61, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1734032005356 2024-12-12T19:33:27,582 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 2028c39bf29c4bf9afee8bb3cc682497, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1734032005356 2024-12-12T19:33:27,603 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#A#compaction#168 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:27,603 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/cb90b828812d4be6b7093ae1f2a2a98f is 50, key is test_row_0/A:col10/1734032006496/Put/seqid=0 2024-12-12T19:33:27,610 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#B#compaction#169 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:27,611 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/179263e6543d44fc9fa34a48a6681846 is 50, key is test_row_0/B:col10/1734032006496/Put/seqid=0 2024-12-12T19:33:27,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742035_1211 (size=12207) 2024-12-12T19:33:27,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742036_1212 (size=12207) 2024-12-12T19:33:27,641 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,649 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T19:33:27,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:27,650 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-12T19:33:27,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:27,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:27,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:27,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:27,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:27,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:27,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/a9d70b10b3c0419cbf71b49af4d978d4 is 50, key is test_row_0/A:col10/1734032006558/Put/seqid=0 2024-12-12T19:33:27,665 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/179263e6543d44fc9fa34a48a6681846 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/179263e6543d44fc9fa34a48a6681846 2024-12-12T19:33:27,667 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/cb90b828812d4be6b7093ae1f2a2a98f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/cb90b828812d4be6b7093ae1f2a2a98f 2024-12-12T19:33:27,677 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/B of f61d5e1e16d46799d7435009ba841107 into 179263e6543d44fc9fa34a48a6681846(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:27,677 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:27,677 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/B, priority=13, startTime=1734032007575; duration=0sec 2024-12-12T19:33:27,678 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:27,678 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:B 2024-12-12T19:33:27,678 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:27,680 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:27,680 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/C is initiating minor compaction (all files) 2024-12-12T19:33:27,680 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/C in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:27,680 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/47f042d63e094abbbb71f22a06d3cc0a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/fe7463bc754748b68f404f135126b77f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/514835e4c7ed44f8b024994eabe3bb5d] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=35.3 K 2024-12-12T19:33:27,681 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 47f042d63e094abbbb71f22a06d3cc0a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734032004693 2024-12-12T19:33:27,681 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting fe7463bc754748b68f404f135126b77f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1734032004726 2024-12-12T19:33:27,682 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 514835e4c7ed44f8b024994eabe3bb5d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1734032005356 2024-12-12T19:33:27,683 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/A of f61d5e1e16d46799d7435009ba841107 into cb90b828812d4be6b7093ae1f2a2a98f(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:27,683 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:27,683 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/A, priority=13, startTime=1734032007575; duration=0sec 2024-12-12T19:33:27,683 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:27,683 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:A 2024-12-12T19:33:27,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:27,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:27,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742037_1213 (size=12001) 2024-12-12T19:33:27,703 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/a9d70b10b3c0419cbf71b49af4d978d4 2024-12-12T19:33:27,713 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#C#compaction#171 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:27,713 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/51f9a9d146584eb6bdead0fc65cce2a6 is 50, key is test_row_0/C:col10/1734032006496/Put/seqid=0 2024-12-12T19:33:27,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:27,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032067712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:27,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032067716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:27,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032067721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:27,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032067721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:27,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032067724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/e5fddaa3e8c94a9b83c5fd7edea86016 is 50, key is test_row_0/B:col10/1734032006558/Put/seqid=0 2024-12-12T19:33:27,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742038_1214 (size=12207) 2024-12-12T19:33:27,750 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/51f9a9d146584eb6bdead0fc65cce2a6 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/51f9a9d146584eb6bdead0fc65cce2a6 2024-12-12T19:33:27,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742039_1215 (size=12001) 2024-12-12T19:33:27,759 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/C of f61d5e1e16d46799d7435009ba841107 into 51f9a9d146584eb6bdead0fc65cce2a6(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:27,759 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:27,759 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/C, priority=13, startTime=1734032007576; duration=0sec 2024-12-12T19:33:27,760 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:27,760 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:C 2024-12-12T19:33:27,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:27,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032067822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:27,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:27,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032067832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032067832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:27,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:27,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032067832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:28,027 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:28,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032068025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:28,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:28,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032068034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:28,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:28,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032068035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:28,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:28,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032068037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:28,167 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/e5fddaa3e8c94a9b83c5fd7edea86016 2024-12-12T19:33:28,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/a8e3670f80344ca78d9debc992832a60 is 50, key is test_row_0/C:col10/1734032006558/Put/seqid=0 2024-12-12T19:33:28,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742040_1216 (size=12001) 2024-12-12T19:33:28,329 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:28,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032068329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:28,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:28,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032068338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:28,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:28,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032068340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:28,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:28,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032068339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:28,642 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/a8e3670f80344ca78d9debc992832a60 2024-12-12T19:33:28,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/a9d70b10b3c0419cbf71b49af4d978d4 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a9d70b10b3c0419cbf71b49af4d978d4 2024-12-12T19:33:28,655 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a9d70b10b3c0419cbf71b49af4d978d4, entries=150, sequenceid=114, filesize=11.7 K 2024-12-12T19:33:28,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/e5fddaa3e8c94a9b83c5fd7edea86016 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/e5fddaa3e8c94a9b83c5fd7edea86016 2024-12-12T19:33:28,663 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/e5fddaa3e8c94a9b83c5fd7edea86016, entries=150, sequenceid=114, filesize=11.7 K 2024-12-12T19:33:28,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/a8e3670f80344ca78d9debc992832a60 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/a8e3670f80344ca78d9debc992832a60 2024-12-12T19:33:28,677 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/a8e3670f80344ca78d9debc992832a60, entries=150, sequenceid=114, filesize=11.7 K 2024-12-12T19:33:28,683 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=107.34 KB/109920 for f61d5e1e16d46799d7435009ba841107 in 1033ms, sequenceid=114, compaction requested=false 2024-12-12T19:33:28,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:28,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:28,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=66 2024-12-12T19:33:28,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=66 2024-12-12T19:33:28,690 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-12T19:33:28,690 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.0800 sec 2024-12-12T19:33:28,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees in 4.0860 sec 2024-12-12T19:33:28,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T19:33:28,719 INFO [Thread-932 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-12-12T19:33:28,728 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:33:28,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-12-12T19:33:28,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-12T19:33:28,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:28,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-12T19:33:28,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:28,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:28,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:28,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:28,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:28,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:28,735 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:33:28,736 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:33:28,736 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:33:28,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/e83c7177b6334566ab6a76565c57cd7f is 50, key is test_row_0/A:col10/1734032007710/Put/seqid=0 2024-12-12T19:33:28,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742041_1217 (size=12151) 2024-12-12T19:33:28,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:28,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032068781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:28,784 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/e83c7177b6334566ab6a76565c57cd7f 2024-12-12T19:33:28,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/52a0849f7d334c09b9768f9c069a308f is 50, key is test_row_0/B:col10/1734032007710/Put/seqid=0 2024-12-12T19:33:28,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-12T19:33:28,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:28,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032068835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:28,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742042_1218 (size=12151) 2024-12-12T19:33:28,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:28,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032068841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:28,845 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/52a0849f7d334c09b9768f9c069a308f 2024-12-12T19:33:28,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:28,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032068850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:28,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:28,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032068850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:28,852 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/783c8db842a54bf68704e9ccf42e57b9 is 50, key is test_row_0/C:col10/1734032007710/Put/seqid=0 2024-12-12T19:33:28,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742043_1219 (size=12151) 2024-12-12T19:33:28,868 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/783c8db842a54bf68704e9ccf42e57b9 2024-12-12T19:33:28,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:28,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032068883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:28,888 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:28,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-12T19:33:28,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:28,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:28,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:28,889 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:28,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:28,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:28,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/e83c7177b6334566ab6a76565c57cd7f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/e83c7177b6334566ab6a76565c57cd7f 2024-12-12T19:33:28,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/e83c7177b6334566ab6a76565c57cd7f, entries=150, sequenceid=137, filesize=11.9 K 2024-12-12T19:33:28,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/52a0849f7d334c09b9768f9c069a308f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/52a0849f7d334c09b9768f9c069a308f 2024-12-12T19:33:28,929 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/52a0849f7d334c09b9768f9c069a308f, entries=150, sequenceid=137, filesize=11.9 K 2024-12-12T19:33:28,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/783c8db842a54bf68704e9ccf42e57b9 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/783c8db842a54bf68704e9ccf42e57b9 2024-12-12T19:33:28,936 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/783c8db842a54bf68704e9ccf42e57b9, entries=150, sequenceid=137, filesize=11.9 K 2024-12-12T19:33:28,937 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for f61d5e1e16d46799d7435009ba841107 in 206ms, sequenceid=137, compaction requested=true 2024-12-12T19:33:28,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:28,938 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:28,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:33:28,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:28,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:33:28,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:28,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:33:28,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:33:28,939 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:28,939 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:28,939 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/A is initiating minor compaction (all files) 2024-12-12T19:33:28,939 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/A in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:28,939 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/cb90b828812d4be6b7093ae1f2a2a98f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a9d70b10b3c0419cbf71b49af4d978d4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/e83c7177b6334566ab6a76565c57cd7f] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=35.5 K 2024-12-12T19:33:28,940 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting cb90b828812d4be6b7093ae1f2a2a98f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1734032005356 2024-12-12T19:33:28,941 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting a9d70b10b3c0419cbf71b49af4d978d4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734032006547 2024-12-12T19:33:28,941 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting e83c7177b6334566ab6a76565c57cd7f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1734032007710 2024-12-12T19:33:28,951 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:28,951 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/B is initiating minor compaction (all files) 2024-12-12T19:33:28,951 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/B in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:28,952 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/179263e6543d44fc9fa34a48a6681846, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/e5fddaa3e8c94a9b83c5fd7edea86016, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/52a0849f7d334c09b9768f9c069a308f] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=35.5 K 2024-12-12T19:33:28,955 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 179263e6543d44fc9fa34a48a6681846, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1734032005356 2024-12-12T19:33:28,959 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5fddaa3e8c94a9b83c5fd7edea86016, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734032006547 2024-12-12T19:33:28,963 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52a0849f7d334c09b9768f9c069a308f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1734032007710 2024-12-12T19:33:28,964 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#A#compaction#177 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:28,965 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/840f398f7da846f59a18fd24ff422597 is 50, key is test_row_0/A:col10/1734032007710/Put/seqid=0 2024-12-12T19:33:29,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742044_1220 (size=12459) 2024-12-12T19:33:29,017 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#B#compaction#178 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:29,017 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/bbce2a69df5141e49b270f693ac03087 is 50, key is test_row_0/B:col10/1734032007710/Put/seqid=0 2024-12-12T19:33:29,029 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/840f398f7da846f59a18fd24ff422597 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/840f398f7da846f59a18fd24ff422597 2024-12-12T19:33:29,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-12T19:33:29,047 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:29,048 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-12T19:33:29,049 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/A of f61d5e1e16d46799d7435009ba841107 into 840f398f7da846f59a18fd24ff422597(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:29,049 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:29,049 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/A, priority=13, startTime=1734032008938; duration=0sec 2024-12-12T19:33:29,049 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:29,049 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:A 2024-12-12T19:33:29,049 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:29,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:29,051 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-12T19:33:29,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:29,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:29,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:29,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:29,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:29,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:29,052 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:29,052 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/C is initiating minor compaction (all files) 2024-12-12T19:33:29,052 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/C in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:29,052 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/51f9a9d146584eb6bdead0fc65cce2a6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/a8e3670f80344ca78d9debc992832a60, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/783c8db842a54bf68704e9ccf42e57b9] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=35.5 K 2024-12-12T19:33:29,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742045_1221 (size=12459) 2024-12-12T19:33:29,060 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 51f9a9d146584eb6bdead0fc65cce2a6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1734032005356 2024-12-12T19:33:29,068 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting a8e3670f80344ca78d9debc992832a60, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734032006547 2024-12-12T19:33:29,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/3be2ac6efde840c3b160fd5a6b26b5a4 is 50, key is test_row_0/A:col10/1734032008752/Put/seqid=0 2024-12-12T19:33:29,074 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 783c8db842a54bf68704e9ccf42e57b9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1734032007710 2024-12-12T19:33:29,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:29,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:29,125 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#C#compaction#180 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:29,126 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/29d4b5ebc3b645beb8d5be933b304ae2 is 50, key is test_row_0/C:col10/1734032007710/Put/seqid=0 2024-12-12T19:33:29,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742046_1222 (size=12151) 2024-12-12T19:33:29,139 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/3be2ac6efde840c3b160fd5a6b26b5a4 2024-12-12T19:33:29,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/4ac00b73ad2249f1ae1999df2ed18d30 is 50, key is test_row_0/B:col10/1734032008752/Put/seqid=0 2024-12-12T19:33:29,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742047_1223 (size=12459) 2024-12-12T19:33:29,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742048_1224 (size=12151) 2024-12-12T19:33:29,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:29,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032069229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:29,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-12T19:33:29,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:29,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032069335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:29,473 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/bbce2a69df5141e49b270f693ac03087 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/bbce2a69df5141e49b270f693ac03087 2024-12-12T19:33:29,479 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/B of f61d5e1e16d46799d7435009ba841107 into bbce2a69df5141e49b270f693ac03087(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:29,479 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:29,479 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/B, priority=13, startTime=1734032008938; duration=0sec 2024-12-12T19:33:29,480 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:29,480 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:B 2024-12-12T19:33:29,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:29,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032069548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:29,638 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/29d4b5ebc3b645beb8d5be933b304ae2 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/29d4b5ebc3b645beb8d5be933b304ae2 2024-12-12T19:33:29,644 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/4ac00b73ad2249f1ae1999df2ed18d30 2024-12-12T19:33:29,675 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/C of f61d5e1e16d46799d7435009ba841107 into 29d4b5ebc3b645beb8d5be933b304ae2(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:29,676 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:29,676 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/C, priority=13, startTime=1734032008938; duration=0sec 2024-12-12T19:33:29,676 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:29,676 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:C 2024-12-12T19:33:29,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/b5bc0dd3c40d4112ad488641e05953e9 is 50, key is test_row_0/C:col10/1734032008752/Put/seqid=0 2024-12-12T19:33:29,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742049_1225 (size=12151) 2024-12-12T19:33:29,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-12T19:33:29,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:29,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032069846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:29,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:29,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:29,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032069853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:29,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032069853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:29,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:29,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032069863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:29,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:29,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032069867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:30,132 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/b5bc0dd3c40d4112ad488641e05953e9 2024-12-12T19:33:30,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/3be2ac6efde840c3b160fd5a6b26b5a4 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/3be2ac6efde840c3b160fd5a6b26b5a4 2024-12-12T19:33:30,140 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/3be2ac6efde840c3b160fd5a6b26b5a4, entries=150, sequenceid=154, filesize=11.9 K 2024-12-12T19:33:30,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/4ac00b73ad2249f1ae1999df2ed18d30 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/4ac00b73ad2249f1ae1999df2ed18d30 2024-12-12T19:33:30,145 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/4ac00b73ad2249f1ae1999df2ed18d30, entries=150, sequenceid=154, filesize=11.9 K 2024-12-12T19:33:30,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/b5bc0dd3c40d4112ad488641e05953e9 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/b5bc0dd3c40d4112ad488641e05953e9 2024-12-12T19:33:30,151 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/b5bc0dd3c40d4112ad488641e05953e9, entries=150, sequenceid=154, filesize=11.9 K 2024-12-12T19:33:30,152 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for f61d5e1e16d46799d7435009ba841107 in 1101ms, sequenceid=154, compaction requested=false 2024-12-12T19:33:30,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:30,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:30,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-12-12T19:33:30,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-12-12T19:33:30,154 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-12-12T19:33:30,154 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4170 sec 2024-12-12T19:33:30,156 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 1.4270 sec 2024-12-12T19:33:30,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:30,361 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-12T19:33:30,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:30,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:30,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:30,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:30,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:30,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:30,370 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/35291f7d3cc54f04b26a78eb6d5e6574 is 50, key is test_row_0/A:col10/1734032009198/Put/seqid=0 2024-12-12T19:33:30,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742050_1226 (size=12151) 2024-12-12T19:33:30,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:30,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032070396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:30,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:30,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032070499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:30,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:30,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032070702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:30,796 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/35291f7d3cc54f04b26a78eb6d5e6574 2024-12-12T19:33:30,811 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/53eab477efd241a6933f3b6de618d667 is 50, key is test_row_0/B:col10/1734032009198/Put/seqid=0 2024-12-12T19:33:30,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742051_1227 (size=12151) 2024-12-12T19:33:30,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-12T19:33:30,836 INFO [Thread-932 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-12T19:33:30,840 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:33:30,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-12-12T19:33:30,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-12T19:33:30,843 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:33:30,846 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:33:30,846 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:33:30,872 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T19:33:30,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-12T19:33:30,999 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:31,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-12T19:33:31,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:31,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:31,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:31,002 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:31,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:31,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:31,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:31,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032071009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:31,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-12T19:33:31,160 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:31,160 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-12T19:33:31,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:31,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:31,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:31,160 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:31,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:31,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:31,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/53eab477efd241a6933f3b6de618d667 2024-12-12T19:33:31,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/74deffd12d3b4f0eaa4c9bb98a76dabe is 50, key is test_row_0/C:col10/1734032009198/Put/seqid=0 2024-12-12T19:33:31,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742052_1228 (size=12151) 2024-12-12T19:33:31,323 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:31,327 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-12T19:33:31,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:31,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:31,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:31,329 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:31,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:31,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:31,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-12T19:33:31,486 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:31,486 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-12T19:33:31,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:31,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:31,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:31,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:31,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:31,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:31,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:31,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032071520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:31,642 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:31,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-12T19:33:31,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:31,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:31,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:31,642 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:31,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:31,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:31,701 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/74deffd12d3b4f0eaa4c9bb98a76dabe 2024-12-12T19:33:31,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/35291f7d3cc54f04b26a78eb6d5e6574 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/35291f7d3cc54f04b26a78eb6d5e6574 2024-12-12T19:33:31,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/35291f7d3cc54f04b26a78eb6d5e6574, entries=150, sequenceid=177, filesize=11.9 K 2024-12-12T19:33:31,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/53eab477efd241a6933f3b6de618d667 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/53eab477efd241a6933f3b6de618d667 2024-12-12T19:33:31,732 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/53eab477efd241a6933f3b6de618d667, entries=150, sequenceid=177, filesize=11.9 K 2024-12-12T19:33:31,733 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/74deffd12d3b4f0eaa4c9bb98a76dabe as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/74deffd12d3b4f0eaa4c9bb98a76dabe 2024-12-12T19:33:31,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/74deffd12d3b4f0eaa4c9bb98a76dabe, entries=150, sequenceid=177, filesize=11.9 K 2024-12-12T19:33:31,748 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for f61d5e1e16d46799d7435009ba841107 in 1387ms, sequenceid=177, compaction requested=true 2024-12-12T19:33:31,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:31,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:33:31,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:31,749 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:31,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:33:31,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:31,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:33:31,749 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:31,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:31,750 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:31,750 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/B is initiating minor compaction (all files) 2024-12-12T19:33:31,750 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/B in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:31,750 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/bbce2a69df5141e49b270f693ac03087, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/4ac00b73ad2249f1ae1999df2ed18d30, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/53eab477efd241a6933f3b6de618d667] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=35.9 K 2024-12-12T19:33:31,750 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:31,750 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/A is initiating minor compaction (all files) 2024-12-12T19:33:31,750 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/A in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:31,750 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/840f398f7da846f59a18fd24ff422597, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/3be2ac6efde840c3b160fd5a6b26b5a4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/35291f7d3cc54f04b26a78eb6d5e6574] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=35.9 K 2024-12-12T19:33:31,750 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting bbce2a69df5141e49b270f693ac03087, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1734032007710 2024-12-12T19:33:31,750 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 840f398f7da846f59a18fd24ff422597, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1734032007710 2024-12-12T19:33:31,750 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ac00b73ad2249f1ae1999df2ed18d30, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734032008752 2024-12-12T19:33:31,751 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3be2ac6efde840c3b160fd5a6b26b5a4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734032008752 2024-12-12T19:33:31,751 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 53eab477efd241a6933f3b6de618d667, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1734032009198 2024-12-12T19:33:31,751 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35291f7d3cc54f04b26a78eb6d5e6574, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1734032009198 2024-12-12T19:33:31,757 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#B#compaction#186 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:31,758 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/1b972d13f5974c0f989848b34ded447b is 50, key is test_row_0/B:col10/1734032009198/Put/seqid=0 2024-12-12T19:33:31,759 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#A#compaction#187 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:31,760 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/517d6f3217634036991583e3de4a126e is 50, key is test_row_0/A:col10/1734032009198/Put/seqid=0 2024-12-12T19:33:31,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742053_1229 (size=12561) 2024-12-12T19:33:31,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742054_1230 (size=12561) 2024-12-12T19:33:31,794 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:31,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-12T19:33:31,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:31,795 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-12T19:33:31,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:31,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:31,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:31,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:31,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:31,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:31,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/ee4a0658e31744cf9f976bb4d3a17748 is 50, key is test_row_0/A:col10/1734032010394/Put/seqid=0 2024-12-12T19:33:31,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742055_1231 (size=9757) 2024-12-12T19:33:31,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:31,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:31,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:31,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032071888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:31,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:31,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032071889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:31,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:31,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032071890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:31,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:31,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032071890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:31,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-12T19:33:31,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:31,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032071991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:31,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:31,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:31,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032071994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:31,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032071994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:31,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:31,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032071994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:32,188 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/517d6f3217634036991583e3de4a126e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/517d6f3217634036991583e3de4a126e 2024-12-12T19:33:32,188 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/1b972d13f5974c0f989848b34ded447b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/1b972d13f5974c0f989848b34ded447b 2024-12-12T19:33:32,193 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/B of f61d5e1e16d46799d7435009ba841107 into 1b972d13f5974c0f989848b34ded447b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:32,193 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:32,193 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/A of f61d5e1e16d46799d7435009ba841107 into 517d6f3217634036991583e3de4a126e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:32,193 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/B, priority=13, startTime=1734032011749; duration=0sec 2024-12-12T19:33:32,193 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:32,193 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/A, priority=13, startTime=1734032011748; duration=0sec 2024-12-12T19:33:32,193 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:32,193 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:32,193 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:B 2024-12-12T19:33:32,193 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:A 2024-12-12T19:33:32,193 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:32,194 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:32,194 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/C is initiating minor compaction (all files) 2024-12-12T19:33:32,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:32,194 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/C in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:32,195 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/29d4b5ebc3b645beb8d5be933b304ae2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/b5bc0dd3c40d4112ad488641e05953e9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/74deffd12d3b4f0eaa4c9bb98a76dabe] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=35.9 K 2024-12-12T19:33:32,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032072194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:32,195 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 29d4b5ebc3b645beb8d5be933b304ae2, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1734032007710 2024-12-12T19:33:32,195 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting b5bc0dd3c40d4112ad488641e05953e9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734032008752 2024-12-12T19:33:32,196 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 74deffd12d3b4f0eaa4c9bb98a76dabe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1734032009198 2024-12-12T19:33:32,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:32,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032072196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:32,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:32,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032072200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:32,202 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/ee4a0658e31744cf9f976bb4d3a17748 2024-12-12T19:33:32,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:32,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032072201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:32,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/db5b94e077a340eba52ee3571f17d741 is 50, key is test_row_0/B:col10/1734032010394/Put/seqid=0 2024-12-12T19:33:32,213 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#C#compaction#190 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:32,214 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/9972ff0eff4d45879c7585128736b556 is 50, key is test_row_0/C:col10/1734032009198/Put/seqid=0 2024-12-12T19:33:32,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742056_1232 (size=9757) 2024-12-12T19:33:32,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742057_1233 (size=12561) 2024-12-12T19:33:32,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:32,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032072504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:32,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:32,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:32,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032072504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:32,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032072504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:32,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:32,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032072504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:32,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:32,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032072530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:32,618 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/db5b94e077a340eba52ee3571f17d741 2024-12-12T19:33:32,642 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/9972ff0eff4d45879c7585128736b556 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/9972ff0eff4d45879c7585128736b556 2024-12-12T19:33:32,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/3422996b27ec4a648e40c9649b774b6b is 50, key is test_row_0/C:col10/1734032010394/Put/seqid=0 2024-12-12T19:33:32,649 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/C of f61d5e1e16d46799d7435009ba841107 into 9972ff0eff4d45879c7585128736b556(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:32,649 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:32,649 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/C, priority=13, startTime=1734032011749; duration=0sec 2024-12-12T19:33:32,649 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:32,649 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:C 2024-12-12T19:33:32,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742058_1234 (size=9757) 2024-12-12T19:33:32,683 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/3422996b27ec4a648e40c9649b774b6b 2024-12-12T19:33:32,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/ee4a0658e31744cf9f976bb4d3a17748 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/ee4a0658e31744cf9f976bb4d3a17748 2024-12-12T19:33:32,764 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/ee4a0658e31744cf9f976bb4d3a17748, entries=100, sequenceid=192, filesize=9.5 K 2024-12-12T19:33:32,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/db5b94e077a340eba52ee3571f17d741 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/db5b94e077a340eba52ee3571f17d741 2024-12-12T19:33:32,776 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/db5b94e077a340eba52ee3571f17d741, entries=100, sequenceid=192, filesize=9.5 K 2024-12-12T19:33:32,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/3422996b27ec4a648e40c9649b774b6b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/3422996b27ec4a648e40c9649b774b6b 2024-12-12T19:33:32,782 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/3422996b27ec4a648e40c9649b774b6b, entries=100, sequenceid=192, filesize=9.5 K 2024-12-12T19:33:32,783 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for f61d5e1e16d46799d7435009ba841107 in 989ms, sequenceid=192, compaction requested=false 2024-12-12T19:33:32,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:32,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:32,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-12T19:33:32,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-12T19:33:32,789 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-12T19:33:32,789 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9410 sec 2024-12-12T19:33:32,791 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 1.9490 sec 2024-12-12T19:33:32,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-12T19:33:32,949 INFO [Thread-932 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-12-12T19:33:32,952 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:33:32,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-12T19:33:32,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T19:33:32,956 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:33:32,957 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:33:32,958 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:33:33,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:33,019 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T19:33:33,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:33,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:33,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:33,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:33,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:33,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:33,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/5dd2eeb927a9445b9ea1b67d032487ba is 50, key is test_row_0/A:col10/1734032013014/Put/seqid=0 2024-12-12T19:33:33,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:33,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:33,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032073040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032073042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:33,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032073042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:33,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032073044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T19:33:33,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742059_1235 (size=12151) 2024-12-12T19:33:33,115 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,116 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-12T19:33:33,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:33,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:33,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:33,116 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:33,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:33,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:33,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:33,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:33,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032073145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032073145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:33,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032073146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:33,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032073148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T19:33:33,271 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,275 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-12T19:33:33,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:33,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:33,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:33,276 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:33,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:33,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:33,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:33,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032073352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:33,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:33,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032073352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032073352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:33,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032073355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,430 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,431 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-12T19:33:33,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:33,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:33,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:33,432 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:33,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:33,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:33,468 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/5dd2eeb927a9445b9ea1b67d032487ba 2024-12-12T19:33:33,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/0d2052c28fe44b5d97ad2bdd4818697f is 50, key is test_row_0/B:col10/1734032013014/Put/seqid=0 2024-12-12T19:33:33,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742060_1236 (size=12151) 2024-12-12T19:33:33,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/0d2052c28fe44b5d97ad2bdd4818697f 2024-12-12T19:33:33,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T19:33:33,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/d8224b88f7524cf19a8af5b07e5d3132 is 50, key is test_row_0/C:col10/1734032013014/Put/seqid=0 2024-12-12T19:33:33,583 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,587 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-12T19:33:33,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:33,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742061_1237 (size=12151) 2024-12-12T19:33:33,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:33,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:33,587 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:33,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:33,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:33,598 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/d8224b88f7524cf19a8af5b07e5d3132 2024-12-12T19:33:33,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/5dd2eeb927a9445b9ea1b67d032487ba as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/5dd2eeb927a9445b9ea1b67d032487ba 2024-12-12T19:33:33,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:33,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032073657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:33,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032073659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/5dd2eeb927a9445b9ea1b67d032487ba, entries=150, sequenceid=219, filesize=11.9 K 2024-12-12T19:33:33,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:33,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032073663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/0d2052c28fe44b5d97ad2bdd4818697f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/0d2052c28fe44b5d97ad2bdd4818697f 2024-12-12T19:33:33,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:33,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032073664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/0d2052c28fe44b5d97ad2bdd4818697f, entries=150, sequenceid=219, filesize=11.9 K 2024-12-12T19:33:33,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/d8224b88f7524cf19a8af5b07e5d3132 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/d8224b88f7524cf19a8af5b07e5d3132 2024-12-12T19:33:33,723 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/d8224b88f7524cf19a8af5b07e5d3132, entries=150, sequenceid=219, filesize=11.9 K 2024-12-12T19:33:33,729 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for f61d5e1e16d46799d7435009ba841107 in 710ms, sequenceid=219, compaction requested=true 2024-12-12T19:33:33,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:33,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:33:33,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:33,729 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:33,729 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:33,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:33:33,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:33,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:33:33,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:33,731 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:33,731 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/A is initiating minor compaction (all files) 2024-12-12T19:33:33,731 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/A in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:33,731 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/517d6f3217634036991583e3de4a126e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/ee4a0658e31744cf9f976bb4d3a17748, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/5dd2eeb927a9445b9ea1b67d032487ba] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=33.7 K 2024-12-12T19:33:33,731 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:33,731 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/B is initiating minor compaction (all files) 2024-12-12T19:33:33,731 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/B in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:33,731 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/1b972d13f5974c0f989848b34ded447b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/db5b94e077a340eba52ee3571f17d741, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/0d2052c28fe44b5d97ad2bdd4818697f] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=33.7 K 2024-12-12T19:33:33,732 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 517d6f3217634036991583e3de4a126e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1734032009198 2024-12-12T19:33:33,733 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b972d13f5974c0f989848b34ded447b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1734032009198 2024-12-12T19:33:33,733 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee4a0658e31744cf9f976bb4d3a17748, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1734032010391 2024-12-12T19:33:33,739 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting db5b94e077a340eba52ee3571f17d741, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1734032010391 2024-12-12T19:33:33,740 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5dd2eeb927a9445b9ea1b67d032487ba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734032011886 2024-12-12T19:33:33,740 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d2052c28fe44b5d97ad2bdd4818697f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734032011886 2024-12-12T19:33:33,750 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:33,750 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-12T19:33:33,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:33,750 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:33:33,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:33,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:33,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:33,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:33,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:33,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:33,760 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#B#compaction#195 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:33,760 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/63b4a497f9aa4b5bafe99db1bf929060 is 50, key is test_row_0/B:col10/1734032013014/Put/seqid=0 2024-12-12T19:33:33,765 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#A#compaction#196 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:33,765 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/4ac5a5d13cc441fcaa1ca8c42102881b is 50, key is test_row_0/A:col10/1734032013014/Put/seqid=0 2024-12-12T19:33:33,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/1fe00b260c0b4472b1bcf440e93a1d5c is 50, key is test_row_0/A:col10/1734032013041/Put/seqid=0 2024-12-12T19:33:33,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742062_1238 (size=12663) 2024-12-12T19:33:33,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742064_1240 (size=12663) 2024-12-12T19:33:33,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742063_1239 (size=12151) 2024-12-12T19:33:33,823 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/63b4a497f9aa4b5bafe99db1bf929060 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/63b4a497f9aa4b5bafe99db1bf929060 2024-12-12T19:33:33,847 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/B of f61d5e1e16d46799d7435009ba841107 into 63b4a497f9aa4b5bafe99db1bf929060(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:33,847 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:33,847 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/B, priority=13, startTime=1734032013729; duration=0sec 2024-12-12T19:33:33,847 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:33,847 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:B 2024-12-12T19:33:33,847 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:33,852 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:33,852 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/C is initiating minor compaction (all files) 2024-12-12T19:33:33,852 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/C in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:33,852 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/9972ff0eff4d45879c7585128736b556, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/3422996b27ec4a648e40c9649b774b6b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/d8224b88f7524cf19a8af5b07e5d3132] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=33.7 K 2024-12-12T19:33:33,853 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 9972ff0eff4d45879c7585128736b556, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1734032009198 2024-12-12T19:33:33,856 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 3422996b27ec4a648e40c9649b774b6b, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1734032010391 2024-12-12T19:33:33,858 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting d8224b88f7524cf19a8af5b07e5d3132, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734032011886 2024-12-12T19:33:33,916 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#C#compaction#198 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:33,917 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/afeaf1a2b8514be984967c5e527074c2 is 50, key is test_row_0/C:col10/1734032013014/Put/seqid=0 2024-12-12T19:33:33,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742065_1241 (size=12663) 2024-12-12T19:33:33,954 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/afeaf1a2b8514be984967c5e527074c2 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/afeaf1a2b8514be984967c5e527074c2 2024-12-12T19:33:33,962 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/C of f61d5e1e16d46799d7435009ba841107 into afeaf1a2b8514be984967c5e527074c2(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:33,963 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:33,963 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/C, priority=13, startTime=1734032013730; duration=0sec 2024-12-12T19:33:33,963 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:33,963 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:C 2024-12-12T19:33:34,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T19:33:34,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:34,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:34,215 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/1fe00b260c0b4472b1bcf440e93a1d5c 2024-12-12T19:33:34,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032074218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032074219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032074223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032074228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,230 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/4ac5a5d13cc441fcaa1ca8c42102881b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/4ac5a5d13cc441fcaa1ca8c42102881b 2024-12-12T19:33:34,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/a78dbc9348ab4eeda94dc2e88ca5c5a8 is 50, key is test_row_0/B:col10/1734032013041/Put/seqid=0 2024-12-12T19:33:34,256 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/A of f61d5e1e16d46799d7435009ba841107 into 4ac5a5d13cc441fcaa1ca8c42102881b(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:34,256 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:34,256 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/A, priority=13, startTime=1734032013729; duration=0sec 2024-12-12T19:33:34,256 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:34,256 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:A 2024-12-12T19:33:34,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742066_1242 (size=12151) 2024-12-12T19:33:34,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032074327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032074327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032074339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032074346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032074532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032074544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032074548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032074548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,553 DEBUG [Thread-921 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4157 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:33:34,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032074550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,680 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/a78dbc9348ab4eeda94dc2e88ca5c5a8 2024-12-12T19:33:34,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/21372d604a7846998c20ec6e91c21bdc is 50, key is test_row_0/C:col10/1734032013041/Put/seqid=0 2024-12-12T19:33:34,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742067_1243 (size=12151) 2024-12-12T19:33:34,739 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/21372d604a7846998c20ec6e91c21bdc 2024-12-12T19:33:34,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/1fe00b260c0b4472b1bcf440e93a1d5c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/1fe00b260c0b4472b1bcf440e93a1d5c 2024-12-12T19:33:34,748 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/1fe00b260c0b4472b1bcf440e93a1d5c, entries=150, sequenceid=231, filesize=11.9 K 2024-12-12T19:33:34,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/a78dbc9348ab4eeda94dc2e88ca5c5a8 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a78dbc9348ab4eeda94dc2e88ca5c5a8 2024-12-12T19:33:34,761 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a78dbc9348ab4eeda94dc2e88ca5c5a8, entries=150, sequenceid=231, filesize=11.9 K 2024-12-12T19:33:34,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/21372d604a7846998c20ec6e91c21bdc as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/21372d604a7846998c20ec6e91c21bdc 2024-12-12T19:33:34,767 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/21372d604a7846998c20ec6e91c21bdc, entries=150, sequenceid=231, filesize=11.9 K 2024-12-12T19:33:34,768 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for f61d5e1e16d46799d7435009ba841107 in 1018ms, sequenceid=231, compaction requested=false 2024-12-12T19:33:34,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:34,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:34,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-12T19:33:34,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-12T19:33:34,770 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-12T19:33:34,770 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8120 sec 2024-12-12T19:33:34,772 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.8190 sec 2024-12-12T19:33:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:34,845 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T19:33:34,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:34,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:34,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:34,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:34,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:34,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:34,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/210f2d7a2d6c49f892c515350ee7b5cb is 50, key is test_row_0/A:col10/1734032014217/Put/seqid=0 2024-12-12T19:33:34,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032074855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032074855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032074855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032074865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742068_1244 (size=12201) 2024-12-12T19:33:34,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032074956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032074963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:34,969 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:34,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032074967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:35,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T19:33:35,058 INFO [Thread-932 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-12T19:33:35,067 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:33:35,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-12T19:33:35,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T19:33:35,069 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:33:35,070 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:33:35,070 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:33:35,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:35,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032075159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:35,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:35,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032075168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:35,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T19:33:35,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:35,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032075171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:35,222 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:35,222 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-12T19:33:35,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:35,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:35,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:35,222 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:35,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:35,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:35,274 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/210f2d7a2d6c49f892c515350ee7b5cb 2024-12-12T19:33:35,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/d51aaf20ffac4c56ad86ec1d1837122f is 50, key is test_row_0/B:col10/1734032014217/Put/seqid=0 2024-12-12T19:33:35,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742069_1245 (size=12201) 2024-12-12T19:33:35,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T19:33:35,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:35,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032075371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:35,374 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:35,375 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-12T19:33:35,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:35,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:35,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:35,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:35,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:35,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:35,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:35,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032075462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:35,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:35,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032075473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:35,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:35,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032075475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:35,532 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:35,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-12T19:33:35,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:35,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:35,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:35,536 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:35,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:35,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:35,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T19:33:35,691 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:35,699 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-12T19:33:35,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:35,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:35,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:35,700 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:35,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:35,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:35,721 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/d51aaf20ffac4c56ad86ec1d1837122f 2024-12-12T19:33:35,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/4150a0647e6243a993a8b6ea3924ad9e is 50, key is test_row_0/C:col10/1734032014217/Put/seqid=0 2024-12-12T19:33:35,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742070_1246 (size=12201) 2024-12-12T19:33:35,853 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:35,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-12T19:33:35,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:35,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:35,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:35,859 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:35,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:35,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:35,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:35,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032075978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:35,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:35,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032075979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:35,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:35,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032075980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:36,014 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:36,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-12T19:33:36,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:36,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:36,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:36,018 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:36,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:36,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:36,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/4150a0647e6243a993a8b6ea3924ad9e 2024-12-12T19:33:36,174 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:36,176 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-12T19:33:36,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:36,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:36,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:36,176 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:36,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:36,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:36,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T19:33:36,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/210f2d7a2d6c49f892c515350ee7b5cb as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/210f2d7a2d6c49f892c515350ee7b5cb 2024-12-12T19:33:36,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/210f2d7a2d6c49f892c515350ee7b5cb, entries=150, sequenceid=259, filesize=11.9 K 2024-12-12T19:33:36,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/d51aaf20ffac4c56ad86ec1d1837122f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/d51aaf20ffac4c56ad86ec1d1837122f 2024-12-12T19:33:36,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/d51aaf20ffac4c56ad86ec1d1837122f, entries=150, sequenceid=259, filesize=11.9 K 2024-12-12T19:33:36,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/4150a0647e6243a993a8b6ea3924ad9e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/4150a0647e6243a993a8b6ea3924ad9e 2024-12-12T19:33:36,294 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/4150a0647e6243a993a8b6ea3924ad9e, entries=150, sequenceid=259, filesize=11.9 K 2024-12-12T19:33:36,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for f61d5e1e16d46799d7435009ba841107 in 1456ms, sequenceid=259, compaction requested=true 2024-12-12T19:33:36,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:36,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:33:36,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:36,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:33:36,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:33:36,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:33:36,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-12T19:33:36,303 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:36,303 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:36,313 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:36,313 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:36,313 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/A is initiating minor compaction (all files) 2024-12-12T19:33:36,313 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/C is initiating minor compaction (all files) 2024-12-12T19:33:36,313 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/C in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:36,313 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/A in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:36,313 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/4ac5a5d13cc441fcaa1ca8c42102881b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/1fe00b260c0b4472b1bcf440e93a1d5c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/210f2d7a2d6c49f892c515350ee7b5cb] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=36.1 K 2024-12-12T19:33:36,313 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/afeaf1a2b8514be984967c5e527074c2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/21372d604a7846998c20ec6e91c21bdc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/4150a0647e6243a993a8b6ea3924ad9e] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=36.1 K 2024-12-12T19:33:36,319 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ac5a5d13cc441fcaa1ca8c42102881b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734032011886 2024-12-12T19:33:36,321 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting afeaf1a2b8514be984967c5e527074c2, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734032011886 2024-12-12T19:33:36,324 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1fe00b260c0b4472b1bcf440e93a1d5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1734032013030 2024-12-12T19:33:36,330 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:36,331 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 21372d604a7846998c20ec6e91c21bdc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1734032013030 2024-12-12T19:33:36,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-12T19:33:36,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:36,333 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T19:33:36,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:36,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:36,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:36,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:36,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:36,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:36,334 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 210f2d7a2d6c49f892c515350ee7b5cb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1734032014215 2024-12-12T19:33:36,344 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 4150a0647e6243a993a8b6ea3924ad9e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1734032014215 2024-12-12T19:33:36,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/1f4f905e6d78459c8b46422ede3d7dac is 50, key is test_row_0/A:col10/1734032014851/Put/seqid=0 2024-12-12T19:33:36,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742071_1247 (size=9857) 2024-12-12T19:33:36,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:36,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:36,394 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/1f4f905e6d78459c8b46422ede3d7dac 2024-12-12T19:33:36,395 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#A#compaction#205 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:36,396 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/0f9fa9ef08b94e10addf012ba03bbc93 is 50, key is test_row_0/A:col10/1734032014217/Put/seqid=0 2024-12-12T19:33:36,398 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#C#compaction#206 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:36,399 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/f0f3b657de25483b8f89a498e5c4aa2e is 50, key is test_row_0/C:col10/1734032014217/Put/seqid=0 2024-12-12T19:33:36,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/fb1ceac8c8a94787acdf0e4d6ba95176 is 50, key is test_row_0/B:col10/1734032014851/Put/seqid=0 2024-12-12T19:33:36,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742072_1248 (size=12815) 2024-12-12T19:33:36,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742073_1249 (size=12815) 2024-12-12T19:33:36,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742074_1250 (size=9857) 2024-12-12T19:33:36,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:36,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032076571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:36,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:36,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032076680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:36,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:36,887 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/0f9fa9ef08b94e10addf012ba03bbc93 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/0f9fa9ef08b94e10addf012ba03bbc93 2024-12-12T19:33:36,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032076885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:36,892 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/f0f3b657de25483b8f89a498e5c4aa2e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/f0f3b657de25483b8f89a498e5c4aa2e 2024-12-12T19:33:36,897 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/fb1ceac8c8a94787acdf0e4d6ba95176 2024-12-12T19:33:36,908 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/C of f61d5e1e16d46799d7435009ba841107 into f0f3b657de25483b8f89a498e5c4aa2e(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:36,908 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:36,908 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/C, priority=13, startTime=1734032016300; duration=0sec 2024-12-12T19:33:36,908 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:36,908 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:C 2024-12-12T19:33:36,908 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:36,909 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/A of f61d5e1e16d46799d7435009ba841107 into 0f9fa9ef08b94e10addf012ba03bbc93(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:36,909 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:36,909 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/A, priority=13, startTime=1734032016300; duration=0sec 2024-12-12T19:33:36,909 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:36,909 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:A 2024-12-12T19:33:36,915 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:36,915 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/B is initiating minor compaction (all files) 2024-12-12T19:33:36,915 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/B in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:36,915 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/63b4a497f9aa4b5bafe99db1bf929060, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a78dbc9348ab4eeda94dc2e88ca5c5a8, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/d51aaf20ffac4c56ad86ec1d1837122f] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=36.1 K 2024-12-12T19:33:36,916 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 63b4a497f9aa4b5bafe99db1bf929060, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734032011886 2024-12-12T19:33:36,921 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting a78dbc9348ab4eeda94dc2e88ca5c5a8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1734032013030 2024-12-12T19:33:36,922 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting d51aaf20ffac4c56ad86ec1d1837122f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1734032014215 2024-12-12T19:33:36,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/14b8dbca7ecc4e7f84e2432c732ae00b is 50, key is test_row_0/C:col10/1734032014851/Put/seqid=0 2024-12-12T19:33:36,937 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#B#compaction#209 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:36,937 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/77ecb336a1cc470fb7b2336b3839a636 is 50, key is test_row_0/B:col10/1734032014217/Put/seqid=0 2024-12-12T19:33:36,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742075_1251 (size=9857) 2024-12-12T19:33:36,989 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:36,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032076988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:36,989 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:36,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032076989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:36,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742076_1252 (size=12815) 2024-12-12T19:33:36,992 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/14b8dbca7ecc4e7f84e2432c732ae00b 2024-12-12T19:33:36,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:36,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032076994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:37,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/1f4f905e6d78459c8b46422ede3d7dac as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/1f4f905e6d78459c8b46422ede3d7dac 2024-12-12T19:33:37,038 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/1f4f905e6d78459c8b46422ede3d7dac, entries=100, sequenceid=270, filesize=9.6 K 2024-12-12T19:33:37,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/fb1ceac8c8a94787acdf0e4d6ba95176 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/fb1ceac8c8a94787acdf0e4d6ba95176 2024-12-12T19:33:37,043 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/fb1ceac8c8a94787acdf0e4d6ba95176, entries=100, sequenceid=270, filesize=9.6 K 2024-12-12T19:33:37,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/14b8dbca7ecc4e7f84e2432c732ae00b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/14b8dbca7ecc4e7f84e2432c732ae00b 2024-12-12T19:33:37,050 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/14b8dbca7ecc4e7f84e2432c732ae00b, entries=100, sequenceid=270, filesize=9.6 K 2024-12-12T19:33:37,051 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for f61d5e1e16d46799d7435009ba841107 in 718ms, sequenceid=270, compaction requested=false 2024-12-12T19:33:37,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:37,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:37,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-12T19:33:37,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-12T19:33:37,059 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-12T19:33:37,059 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9870 sec 2024-12-12T19:33:37,061 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.9920 sec 2024-12-12T19:33:37,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T19:33:37,179 INFO [Thread-932 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-12T19:33:37,181 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:33:37,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-12T19:33:37,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T19:33:37,186 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:33:37,186 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:33:37,187 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:33:37,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:37,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T19:33:37,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:37,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:37,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:37,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:37,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:37,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:37,220 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/92de0add4a444f978a380f4cff195fd4 is 50, key is test_row_0/A:col10/1734032016555/Put/seqid=0 2024-12-12T19:33:37,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:37,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032077220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:37,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742077_1253 (size=12301) 2024-12-12T19:33:37,238 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/92de0add4a444f978a380f4cff195fd4 2024-12-12T19:33:37,247 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/27b7f332c003476fa70425cff511f5c7 is 50, key is test_row_0/B:col10/1734032016555/Put/seqid=0 2024-12-12T19:33:37,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742078_1254 (size=12301) 2024-12-12T19:33:37,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T19:33:37,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:37,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032077331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:37,338 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:37,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-12T19:33:37,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:37,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:37,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:37,339 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:37,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:37,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:37,403 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/77ecb336a1cc470fb7b2336b3839a636 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/77ecb336a1cc470fb7b2336b3839a636 2024-12-12T19:33:37,418 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/B of f61d5e1e16d46799d7435009ba841107 into 77ecb336a1cc470fb7b2336b3839a636(size=12.5 K), total size for store is 22.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:37,418 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:37,418 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/B, priority=13, startTime=1734032016300; duration=0sec 2024-12-12T19:33:37,418 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:37,418 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:B 2024-12-12T19:33:37,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T19:33:37,505 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:37,506 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-12T19:33:37,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:37,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:37,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:37,506 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:37,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:37,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:37,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:37,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032077534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:37,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/27b7f332c003476fa70425cff511f5c7 2024-12-12T19:33:37,658 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:37,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/9aadc8e08c47468f9e642b60618c637e is 50, key is test_row_0/C:col10/1734032016555/Put/seqid=0 2024-12-12T19:33:37,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-12T19:33:37,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:37,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:37,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:37,664 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:37,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:37,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742079_1255 (size=12301) 2024-12-12T19:33:37,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:37,665 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/9aadc8e08c47468f9e642b60618c637e 2024-12-12T19:33:37,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/92de0add4a444f978a380f4cff195fd4 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/92de0add4a444f978a380f4cff195fd4 2024-12-12T19:33:37,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/92de0add4a444f978a380f4cff195fd4, entries=150, sequenceid=298, filesize=12.0 K 2024-12-12T19:33:37,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/27b7f332c003476fa70425cff511f5c7 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/27b7f332c003476fa70425cff511f5c7 2024-12-12T19:33:37,684 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/27b7f332c003476fa70425cff511f5c7, entries=150, sequenceid=298, filesize=12.0 K 2024-12-12T19:33:37,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/9aadc8e08c47468f9e642b60618c637e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/9aadc8e08c47468f9e642b60618c637e 2024-12-12T19:33:37,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/9aadc8e08c47468f9e642b60618c637e, entries=150, sequenceid=298, filesize=12.0 K 2024-12-12T19:33:37,691 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for f61d5e1e16d46799d7435009ba841107 in 499ms, sequenceid=298, compaction requested=true 2024-12-12T19:33:37,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:37,692 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:37,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:33:37,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:37,692 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:37,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:33:37,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:37,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:33:37,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:37,697 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34973 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:37,697 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/A is initiating minor compaction (all files) 2024-12-12T19:33:37,698 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/A in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:37,698 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/0f9fa9ef08b94e10addf012ba03bbc93, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/1f4f905e6d78459c8b46422ede3d7dac, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/92de0add4a444f978a380f4cff195fd4] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=34.2 K 2024-12-12T19:33:37,698 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34973 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:37,698 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/B is initiating minor compaction (all files) 2024-12-12T19:33:37,698 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/B in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:37,698 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f9fa9ef08b94e10addf012ba03bbc93, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1734032014215 2024-12-12T19:33:37,698 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/77ecb336a1cc470fb7b2336b3839a636, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/fb1ceac8c8a94787acdf0e4d6ba95176, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/27b7f332c003476fa70425cff511f5c7] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=34.2 K 2024-12-12T19:33:37,698 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f4f905e6d78459c8b46422ede3d7dac, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1734032014851 2024-12-12T19:33:37,698 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 77ecb336a1cc470fb7b2336b3839a636, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1734032014215 2024-12-12T19:33:37,698 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92de0add4a444f978a380f4cff195fd4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1734032016555 2024-12-12T19:33:37,699 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting fb1ceac8c8a94787acdf0e4d6ba95176, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1734032014851 2024-12-12T19:33:37,699 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 27b7f332c003476fa70425cff511f5c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1734032016555 2024-12-12T19:33:37,720 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#A#compaction#213 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:37,720 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/b1dabc9bd8074bf2b1e32eef3c359e4f is 50, key is test_row_0/A:col10/1734032016555/Put/seqid=0 2024-12-12T19:33:37,735 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#B#compaction#214 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:37,735 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/e681a42e32c44e5ab95e3da598a0035b is 50, key is test_row_0/B:col10/1734032016555/Put/seqid=0 2024-12-12T19:33:37,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742081_1257 (size=13017) 2024-12-12T19:33:37,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742080_1256 (size=13017) 2024-12-12T19:33:37,786 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/e681a42e32c44e5ab95e3da598a0035b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/e681a42e32c44e5ab95e3da598a0035b 2024-12-12T19:33:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T19:33:37,802 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/B of f61d5e1e16d46799d7435009ba841107 into e681a42e32c44e5ab95e3da598a0035b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:37,803 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:37,803 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/B, priority=13, startTime=1734032017692; duration=0sec 2024-12-12T19:33:37,803 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:37,803 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:B 2024-12-12T19:33:37,803 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:37,804 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34973 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:37,804 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/C is initiating minor compaction (all files) 2024-12-12T19:33:37,805 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/C in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:37,805 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/f0f3b657de25483b8f89a498e5c4aa2e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/14b8dbca7ecc4e7f84e2432c732ae00b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/9aadc8e08c47468f9e642b60618c637e] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=34.2 K 2024-12-12T19:33:37,805 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting f0f3b657de25483b8f89a498e5c4aa2e, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1734032014215 2024-12-12T19:33:37,816 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 14b8dbca7ecc4e7f84e2432c732ae00b, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1734032014851 2024-12-12T19:33:37,816 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 9aadc8e08c47468f9e642b60618c637e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1734032016555 2024-12-12T19:33:37,817 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:37,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-12T19:33:37,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:37,819 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-12T19:33:37,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:37,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:37,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:37,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:37,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:37,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:37,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/a2b6061a5a9540899b030e1cd85f6ad8 is 50, key is test_row_0/A:col10/1734032017213/Put/seqid=0 2024-12-12T19:33:37,842 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#C#compaction#216 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:37,842 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/0224bb4081bc4a6f964dbf0e6e10e2c4 is 50, key is test_row_0/C:col10/1734032016555/Put/seqid=0 2024-12-12T19:33:37,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:37,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:37,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742082_1258 (size=12301) 2024-12-12T19:33:37,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742083_1259 (size=13017) 2024-12-12T19:33:37,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032077926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:38,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:38,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032078028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:38,177 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/b1dabc9bd8074bf2b1e32eef3c359e4f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/b1dabc9bd8074bf2b1e32eef3c359e4f 2024-12-12T19:33:38,180 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/A of f61d5e1e16d46799d7435009ba841107 into b1dabc9bd8074bf2b1e32eef3c359e4f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:38,180 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:38,180 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/A, priority=13, startTime=1734032017691; duration=0sec 2024-12-12T19:33:38,180 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:38,180 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:A 2024-12-12T19:33:38,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:38,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032078231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:38,277 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/a2b6061a5a9540899b030e1cd85f6ad8 2024-12-12T19:33:38,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/bc253ce42f2d49ffa7d6ba241b1b9ec4 is 50, key is test_row_0/B:col10/1734032017213/Put/seqid=0 2024-12-12T19:33:38,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T19:33:38,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742084_1260 (size=12301) 2024-12-12T19:33:38,310 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/0224bb4081bc4a6f964dbf0e6e10e2c4 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/0224bb4081bc4a6f964dbf0e6e10e2c4 2024-12-12T19:33:38,318 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/C of f61d5e1e16d46799d7435009ba841107 into 0224bb4081bc4a6f964dbf0e6e10e2c4(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:38,318 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:38,318 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/C, priority=13, startTime=1734032017692; duration=0sec 2024-12-12T19:33:38,318 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:38,318 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:C 2024-12-12T19:33:38,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:38,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032078532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:38,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:38,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37380 deadline: 1734032078587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:38,590 DEBUG [Thread-921 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8194 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:33:38,699 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/bc253ce42f2d49ffa7d6ba241b1b9ec4 2024-12-12T19:33:38,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/50d9d15a2620464fa8cb1b8aa10e44b0 is 50, key is test_row_0/C:col10/1734032017213/Put/seqid=0 2024-12-12T19:33:38,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742085_1261 (size=12301) 2024-12-12T19:33:38,740 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/50d9d15a2620464fa8cb1b8aa10e44b0 2024-12-12T19:33:38,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/a2b6061a5a9540899b030e1cd85f6ad8 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a2b6061a5a9540899b030e1cd85f6ad8 2024-12-12T19:33:38,757 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a2b6061a5a9540899b030e1cd85f6ad8, entries=150, sequenceid=310, filesize=12.0 K 2024-12-12T19:33:38,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/bc253ce42f2d49ffa7d6ba241b1b9ec4 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/bc253ce42f2d49ffa7d6ba241b1b9ec4 2024-12-12T19:33:38,770 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/bc253ce42f2d49ffa7d6ba241b1b9ec4, entries=150, sequenceid=310, filesize=12.0 K 2024-12-12T19:33:38,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/50d9d15a2620464fa8cb1b8aa10e44b0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/50d9d15a2620464fa8cb1b8aa10e44b0 2024-12-12T19:33:38,783 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/50d9d15a2620464fa8cb1b8aa10e44b0, entries=150, sequenceid=310, filesize=12.0 K 2024-12-12T19:33:38,784 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for f61d5e1e16d46799d7435009ba841107 in 965ms, sequenceid=310, compaction requested=false 2024-12-12T19:33:38,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:38,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:38,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-12T19:33:38,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-12T19:33:38,804 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-12T19:33:38,804 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6090 sec 2024-12-12T19:33:38,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.6240 sec 2024-12-12T19:33:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:39,013 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-12T19:33:39,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:39,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:39,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:39,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:39,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:39,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:39,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032079032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:39,037 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/a806e65a5a8c4210a483f2b7969f5387 is 50, key is test_row_0/A:col10/1734032019010/Put/seqid=0 2024-12-12T19:33:39,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032079035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:39,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:39,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032079035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:39,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032079032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:39,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742086_1262 (size=14741) 2024-12-12T19:33:39,088 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/a806e65a5a8c4210a483f2b7969f5387 2024-12-12T19:33:39,111 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/836046af34124c9a843f244542562b6a is 50, key is test_row_0/B:col10/1734032019010/Put/seqid=0 2024-12-12T19:33:39,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742087_1263 (size=12301) 2024-12-12T19:33:39,125 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/836046af34124c9a843f244542562b6a 2024-12-12T19:33:39,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032079140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:39,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/8fc1cffc43d94523913c73711cd26bf0 is 50, key is test_row_0/C:col10/1734032019010/Put/seqid=0 2024-12-12T19:33:39,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032079152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:39,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:39,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032079154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:39,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742088_1264 (size=12301) 2024-12-12T19:33:39,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T19:33:39,291 INFO [Thread-932 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-12T19:33:39,294 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:33:39,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-12T19:33:39,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T19:33:39,303 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:33:39,307 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:33:39,307 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:33:39,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:39,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032079352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:39,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:39,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032079354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:39,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:39,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032079370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:39,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T19:33:39,474 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:39,479 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-12T19:33:39,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:39,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:39,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:39,483 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:39,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:39,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:39,586 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/8fc1cffc43d94523913c73711cd26bf0 2024-12-12T19:33:39,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T19:33:39,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/a806e65a5a8c4210a483f2b7969f5387 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a806e65a5a8c4210a483f2b7969f5387 2024-12-12T19:33:39,651 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:39,652 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-12T19:33:39,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:39,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:39,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:39,652 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:39,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:39,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:39,658 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a806e65a5a8c4210a483f2b7969f5387, entries=200, sequenceid=340, filesize=14.4 K 2024-12-12T19:33:39,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/836046af34124c9a843f244542562b6a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/836046af34124c9a843f244542562b6a 2024-12-12T19:33:39,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:39,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032079658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:39,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:39,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032079664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:39,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:39,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032079674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:39,676 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/836046af34124c9a843f244542562b6a, entries=150, sequenceid=340, filesize=12.0 K 2024-12-12T19:33:39,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/8fc1cffc43d94523913c73711cd26bf0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/8fc1cffc43d94523913c73711cd26bf0 2024-12-12T19:33:39,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/8fc1cffc43d94523913c73711cd26bf0, entries=150, sequenceid=340, filesize=12.0 K 2024-12-12T19:33:39,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for f61d5e1e16d46799d7435009ba841107 in 714ms, sequenceid=340, compaction requested=true 2024-12-12T19:33:39,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:39,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:33:39,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:39,727 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:39,727 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:39,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:33:39,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:39,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:33:39,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:39,736 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:39,736 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/B is initiating minor compaction (all files) 2024-12-12T19:33:39,736 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/B in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:39,736 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/e681a42e32c44e5ab95e3da598a0035b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/bc253ce42f2d49ffa7d6ba241b1b9ec4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/836046af34124c9a843f244542562b6a] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=36.7 K 2024-12-12T19:33:39,739 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40059 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:39,739 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/A is initiating minor compaction (all files) 2024-12-12T19:33:39,739 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/A in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:39,740 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting e681a42e32c44e5ab95e3da598a0035b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1734032016555 2024-12-12T19:33:39,740 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/b1dabc9bd8074bf2b1e32eef3c359e4f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a2b6061a5a9540899b030e1cd85f6ad8, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a806e65a5a8c4210a483f2b7969f5387] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=39.1 K 2024-12-12T19:33:39,747 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1dabc9bd8074bf2b1e32eef3c359e4f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1734032016555 2024-12-12T19:33:39,747 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting bc253ce42f2d49ffa7d6ba241b1b9ec4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734032017208 2024-12-12T19:33:39,748 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2b6061a5a9540899b030e1cd85f6ad8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734032017208 2024-12-12T19:33:39,749 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 836046af34124c9a843f244542562b6a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734032017919 2024-12-12T19:33:39,749 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting a806e65a5a8c4210a483f2b7969f5387, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734032017898 2024-12-12T19:33:39,787 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#A#compaction#222 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:39,787 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/a78c7fe064594e0e8144ebc2bffdafba is 50, key is test_row_0/A:col10/1734032019010/Put/seqid=0 2024-12-12T19:33:39,801 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#B#compaction#223 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:39,802 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/ffb3207c11d548a58f4c7b02a968b25f is 50, key is test_row_0/B:col10/1734032019010/Put/seqid=0 2024-12-12T19:33:39,807 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:39,810 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-12T19:33:39,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:39,811 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-12T19:33:39,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:39,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:39,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:39,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:39,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:39,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:39,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742089_1265 (size=13119) 2024-12-12T19:33:39,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/c9081da0a2f641469b2dfc2e74cc6168 is 50, key is test_row_0/A:col10/1734032019029/Put/seqid=0 2024-12-12T19:33:39,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742090_1266 (size=13119) 2024-12-12T19:33:39,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742091_1267 (size=12301) 2024-12-12T19:33:39,903 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/ffb3207c11d548a58f4c7b02a968b25f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/ffb3207c11d548a58f4c7b02a968b25f 2024-12-12T19:33:39,905 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/a78c7fe064594e0e8144ebc2bffdafba as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a78c7fe064594e0e8144ebc2bffdafba 2024-12-12T19:33:39,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T19:33:39,936 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/A of f61d5e1e16d46799d7435009ba841107 into a78c7fe064594e0e8144ebc2bffdafba(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:39,936 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:39,936 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/A, priority=13, startTime=1734032019727; duration=0sec 2024-12-12T19:33:39,936 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:39,936 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:A 2024-12-12T19:33:39,936 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:39,943 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/B of f61d5e1e16d46799d7435009ba841107 into ffb3207c11d548a58f4c7b02a968b25f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:39,943 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:39,943 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/B, priority=13, startTime=1734032019727; duration=0sec 2024-12-12T19:33:39,943 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:39,944 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:B 2024-12-12T19:33:39,945 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:39,945 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/C is initiating minor compaction (all files) 2024-12-12T19:33:39,945 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/C in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:39,946 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/0224bb4081bc4a6f964dbf0e6e10e2c4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/50d9d15a2620464fa8cb1b8aa10e44b0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/8fc1cffc43d94523913c73711cd26bf0] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=36.7 K 2024-12-12T19:33:39,946 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0224bb4081bc4a6f964dbf0e6e10e2c4, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1734032016555 2024-12-12T19:33:39,946 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50d9d15a2620464fa8cb1b8aa10e44b0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734032017208 2024-12-12T19:33:39,947 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8fc1cffc43d94523913c73711cd26bf0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734032017919 2024-12-12T19:33:39,961 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#C#compaction#225 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:39,962 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/9a248cbf9e4947df9b6607053b273594 is 50, key is test_row_0/C:col10/1734032019010/Put/seqid=0 2024-12-12T19:33:39,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742092_1268 (size=13119) 2024-12-12T19:33:40,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:40,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:40,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:40,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:40,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032080189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:40,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032080191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:40,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:40,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032080191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:40,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:40,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032080199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:40,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:40,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032080296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:40,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:40,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032080297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:40,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:40,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032080298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:40,307 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/c9081da0a2f641469b2dfc2e74cc6168 2024-12-12T19:33:40,306 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:40,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032080304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:40,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/a70fa653705f43f5a2ff4cbd468147fe is 50, key is test_row_0/B:col10/1734032019029/Put/seqid=0 2024-12-12T19:33:40,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742093_1269 (size=12301) 2024-12-12T19:33:40,407 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/a70fa653705f43f5a2ff4cbd468147fe 2024-12-12T19:33:40,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T19:33:40,435 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/9a248cbf9e4947df9b6607053b273594 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/9a248cbf9e4947df9b6607053b273594 2024-12-12T19:33:40,441 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/C of f61d5e1e16d46799d7435009ba841107 into 9a248cbf9e4947df9b6607053b273594(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:40,442 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:40,442 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/C, priority=13, startTime=1734032019728; duration=0sec 2024-12-12T19:33:40,442 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:40,442 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:C 2024-12-12T19:33:40,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/ab3a26d817e6434eab7d578fbfb16485 is 50, key is test_row_0/C:col10/1734032019029/Put/seqid=0 2024-12-12T19:33:40,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742094_1270 (size=12301) 2024-12-12T19:33:40,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:40,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032080504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:40,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:40,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:40,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032080507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:40,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032080507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:40,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:40,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032080518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:40,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:40,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032080807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:40,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:40,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032080820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:40,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:40,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032080824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:40,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:40,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032080828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:40,889 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/ab3a26d817e6434eab7d578fbfb16485 2024-12-12T19:33:40,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/c9081da0a2f641469b2dfc2e74cc6168 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/c9081da0a2f641469b2dfc2e74cc6168 2024-12-12T19:33:40,908 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/c9081da0a2f641469b2dfc2e74cc6168, entries=150, sequenceid=349, filesize=12.0 K 2024-12-12T19:33:40,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/a70fa653705f43f5a2ff4cbd468147fe as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a70fa653705f43f5a2ff4cbd468147fe 2024-12-12T19:33:40,922 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a70fa653705f43f5a2ff4cbd468147fe, entries=150, sequenceid=349, filesize=12.0 K 2024-12-12T19:33:40,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/ab3a26d817e6434eab7d578fbfb16485 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/ab3a26d817e6434eab7d578fbfb16485 2024-12-12T19:33:40,937 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/ab3a26d817e6434eab7d578fbfb16485, entries=150, sequenceid=349, filesize=12.0 K 2024-12-12T19:33:40,938 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for f61d5e1e16d46799d7435009ba841107 in 1128ms, sequenceid=349, compaction requested=false 2024-12-12T19:33:40,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:40,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:40,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-12T19:33:40,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-12T19:33:40,940 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-12T19:33:40,941 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6320 sec 2024-12-12T19:33:40,941 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.6470 sec 2024-12-12T19:33:41,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:41,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-12T19:33:41,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:41,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:41,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:41,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:41,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:41,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:41,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:41,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:41,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032081338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:41,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032081338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:41,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:41,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032081339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:41,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:41,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032081346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:41,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/40d2177ce49847ea969b3036873ff506 is 50, key is test_row_0/A:col10/1734032020189/Put/seqid=0 2024-12-12T19:33:41,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742095_1271 (size=12301) 2024-12-12T19:33:41,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/40d2177ce49847ea969b3036873ff506 2024-12-12T19:33:41,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/6c61df817409410fa30c1a2451fe70fc is 50, key is test_row_0/B:col10/1734032020189/Put/seqid=0 2024-12-12T19:33:41,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T19:33:41,409 INFO [Thread-932 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-12T19:33:41,412 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:33:41,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-12T19:33:41,415 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:33:41,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T19:33:41,415 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:33:41,415 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:33:41,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742096_1272 (size=12301) 2024-12-12T19:33:41,443 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/6c61df817409410fa30c1a2451fe70fc 2024-12-12T19:33:41,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:41,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032081445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:41,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:41,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032081451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:41,457 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/10f1d59ddb3642e2a54caa2b8ed50500 is 50, key is test_row_0/C:col10/1734032020189/Put/seqid=0 2024-12-12T19:33:41,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742097_1273 (size=12301) 2024-12-12T19:33:41,495 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/10f1d59ddb3642e2a54caa2b8ed50500 2024-12-12T19:33:41,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/40d2177ce49847ea969b3036873ff506 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/40d2177ce49847ea969b3036873ff506 2024-12-12T19:33:41,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/40d2177ce49847ea969b3036873ff506, entries=150, sequenceid=381, filesize=12.0 K 2024-12-12T19:33:41,512 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-12T19:33:41,512 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-12T19:33:41,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/6c61df817409410fa30c1a2451fe70fc as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/6c61df817409410fa30c1a2451fe70fc 2024-12-12T19:33:41,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T19:33:41,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/6c61df817409410fa30c1a2451fe70fc, entries=150, sequenceid=381, filesize=12.0 K 2024-12-12T19:33:41,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/10f1d59ddb3642e2a54caa2b8ed50500 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/10f1d59ddb3642e2a54caa2b8ed50500 2024-12-12T19:33:41,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/10f1d59ddb3642e2a54caa2b8ed50500, entries=150, sequenceid=381, filesize=12.0 K 2024-12-12T19:33:41,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for f61d5e1e16d46799d7435009ba841107 in 204ms, sequenceid=381, compaction requested=true 2024-12-12T19:33:41,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:41,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:33:41,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:41,524 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:41,524 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:41,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:33:41,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:41,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:33:41,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:41,525 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:41,525 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:41,525 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/A is initiating minor compaction (all files) 2024-12-12T19:33:41,525 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/B is initiating minor compaction (all files) 2024-12-12T19:33:41,525 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/B in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:41,525 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/ffb3207c11d548a58f4c7b02a968b25f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a70fa653705f43f5a2ff4cbd468147fe, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/6c61df817409410fa30c1a2451fe70fc] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=36.8 K 2024-12-12T19:33:41,525 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/A in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:41,525 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a78c7fe064594e0e8144ebc2bffdafba, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/c9081da0a2f641469b2dfc2e74cc6168, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/40d2177ce49847ea969b3036873ff506] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=36.8 K 2024-12-12T19:33:41,526 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting ffb3207c11d548a58f4c7b02a968b25f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734032017919 2024-12-12T19:33:41,526 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting a78c7fe064594e0e8144ebc2bffdafba, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734032017919 2024-12-12T19:33:41,526 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting a70fa653705f43f5a2ff4cbd468147fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1734032019029 2024-12-12T19:33:41,526 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9081da0a2f641469b2dfc2e74cc6168, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1734032019029 2024-12-12T19:33:41,526 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c61df817409410fa30c1a2451fe70fc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1734032020185 2024-12-12T19:33:41,527 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40d2177ce49847ea969b3036873ff506, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1734032020185 2024-12-12T19:33:41,533 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#B#compaction#231 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:41,533 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/3dee0dfb8bd0412f99edd8f8ee864141 is 50, key is test_row_0/B:col10/1734032020189/Put/seqid=0 2024-12-12T19:33:41,534 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#A#compaction#232 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:41,535 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/39ad6a2800814352bec0d3039172addc is 50, key is test_row_0/A:col10/1734032020189/Put/seqid=0 2024-12-12T19:33:41,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742098_1274 (size=13221) 2024-12-12T19:33:41,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742099_1275 (size=13221) 2024-12-12T19:33:41,556 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/39ad6a2800814352bec0d3039172addc as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/39ad6a2800814352bec0d3039172addc 2024-12-12T19:33:41,561 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/A of f61d5e1e16d46799d7435009ba841107 into 39ad6a2800814352bec0d3039172addc(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:41,561 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:41,561 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/A, priority=13, startTime=1734032021524; duration=0sec 2024-12-12T19:33:41,561 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:41,561 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:A 2024-12-12T19:33:41,561 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:41,562 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:41,562 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/C is initiating minor compaction (all files) 2024-12-12T19:33:41,562 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/C in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:41,562 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/9a248cbf9e4947df9b6607053b273594, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/ab3a26d817e6434eab7d578fbfb16485, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/10f1d59ddb3642e2a54caa2b8ed50500] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=36.8 K 2024-12-12T19:33:41,563 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a248cbf9e4947df9b6607053b273594, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734032017919 2024-12-12T19:33:41,563 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab3a26d817e6434eab7d578fbfb16485, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1734032019029 2024-12-12T19:33:41,563 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10f1d59ddb3642e2a54caa2b8ed50500, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1734032020185 2024-12-12T19:33:41,567 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:41,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T19:33:41,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:41,568 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-12T19:33:41,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:41,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:41,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:41,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:41,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:41,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:41,570 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#C#compaction#233 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:41,571 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/f4d852b2d5f74364887ebe532653d944 is 50, key is test_row_0/C:col10/1734032020189/Put/seqid=0 2024-12-12T19:33:41,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/61b149230c384f969a2468555e01a838 is 50, key is test_row_0/A:col10/1734032021329/Put/seqid=0 2024-12-12T19:33:41,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742100_1276 (size=13221) 2024-12-12T19:33:41,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742101_1277 (size=12301) 2024-12-12T19:33:41,590 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=389 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/61b149230c384f969a2468555e01a838 2024-12-12T19:33:41,597 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/f4d852b2d5f74364887ebe532653d944 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/f4d852b2d5f74364887ebe532653d944 2024-12-12T19:33:41,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/a12176419e9f44109864bcd4151eee1a is 50, key is test_row_0/B:col10/1734032021329/Put/seqid=0 2024-12-12T19:33:41,604 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/C of f61d5e1e16d46799d7435009ba841107 into f4d852b2d5f74364887ebe532653d944(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:41,604 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:41,604 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/C, priority=13, startTime=1734032021524; duration=0sec 2024-12-12T19:33:41,604 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:41,604 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:C 2024-12-12T19:33:41,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742102_1278 (size=12301) 2024-12-12T19:33:41,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:41,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. as already flushing 2024-12-12T19:33:41,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:41,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032081711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:41,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:41,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032081712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:41,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T19:33:41,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:41,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032081814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:41,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:41,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032081816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:41,959 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/3dee0dfb8bd0412f99edd8f8ee864141 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/3dee0dfb8bd0412f99edd8f8ee864141 2024-12-12T19:33:41,967 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/B of f61d5e1e16d46799d7435009ba841107 into 3dee0dfb8bd0412f99edd8f8ee864141(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:41,968 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:41,968 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/B, priority=13, startTime=1734032021524; duration=0sec 2024-12-12T19:33:41,968 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:41,968 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:B 2024-12-12T19:33:42,013 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=389 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/a12176419e9f44109864bcd4151eee1a 2024-12-12T19:33:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T19:33:42,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:42,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032082019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:42,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:42,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032082020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:42,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/95e1e3e2a66e413082bf4c5aa1d7832d is 50, key is test_row_0/C:col10/1734032021329/Put/seqid=0 2024-12-12T19:33:42,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742103_1279 (size=12301) 2024-12-12T19:33:42,329 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:42,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032082326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:42,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:42,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032082326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:42,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:42,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37370 deadline: 1734032082346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:42,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:42,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37366 deadline: 1734032082357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:42,484 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=389 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/95e1e3e2a66e413082bf4c5aa1d7832d 2024-12-12T19:33:42,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/61b149230c384f969a2468555e01a838 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/61b149230c384f969a2468555e01a838 2024-12-12T19:33:42,513 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/61b149230c384f969a2468555e01a838, entries=150, sequenceid=389, filesize=12.0 K 2024-12-12T19:33:42,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/a12176419e9f44109864bcd4151eee1a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a12176419e9f44109864bcd4151eee1a 2024-12-12T19:33:42,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T19:33:42,536 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a12176419e9f44109864bcd4151eee1a, entries=150, sequenceid=389, filesize=12.0 K 2024-12-12T19:33:42,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/95e1e3e2a66e413082bf4c5aa1d7832d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/95e1e3e2a66e413082bf4c5aa1d7832d 2024-12-12T19:33:42,556 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/95e1e3e2a66e413082bf4c5aa1d7832d, entries=150, sequenceid=389, filesize=12.0 K 2024-12-12T19:33:42,563 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for f61d5e1e16d46799d7435009ba841107 in 995ms, sequenceid=389, compaction requested=false 2024-12-12T19:33:42,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:42,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:42,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-12T19:33:42,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-12T19:33:42,576 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-12T19:33:42,576 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1600 sec 2024-12-12T19:33:42,579 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.1650 sec 2024-12-12T19:33:42,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:42,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-12T19:33:42,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:42,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:42,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:42,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:42,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:42,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:42,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032082860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:42,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:42,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032082867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:42,881 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/499470307eca4e60988b8230f25b0147 is 50, key is test_row_0/A:col10/1734032021702/Put/seqid=0 2024-12-12T19:33:42,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742104_1280 (size=12301) 2024-12-12T19:33:42,947 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/499470307eca4e60988b8230f25b0147 2024-12-12T19:33:42,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:42,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032082965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:42,977 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:42,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032082973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:43,018 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/afaf6aecf06348cfbf67a50875c70368 is 50, key is test_row_0/B:col10/1734032021702/Put/seqid=0 2024-12-12T19:33:43,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742105_1281 (size=12301) 2024-12-12T19:33:43,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:43,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032083171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:43,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:43,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032083184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:43,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032083475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:43,487 DEBUG [Thread-935 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x328852db to 127.0.0.1:52216 2024-12-12T19:33:43,487 DEBUG [Thread-935 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:43,488 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/afaf6aecf06348cfbf67a50875c70368 2024-12-12T19:33:43,489 DEBUG [Thread-937 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4b9e2976 to 127.0.0.1:52216 2024-12-12T19:33:43,489 DEBUG [Thread-937 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:43,491 DEBUG [Thread-941 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2f7f772a to 127.0.0.1:52216 2024-12-12T19:33:43,491 DEBUG [Thread-941 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:43,491 DEBUG [Thread-939 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x56e9a678 to 127.0.0.1:52216 2024-12-12T19:33:43,491 DEBUG [Thread-939 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:43,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:43,499 DEBUG [Thread-933 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a78bf6d to 127.0.0.1:52216 2024-12-12T19:33:43,500 DEBUG [Thread-933 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:43,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032083497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:43,509 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/e64682114c6f430282062bb0f6b2fc64 is 50, key is test_row_0/C:col10/1734032021702/Put/seqid=0 2024-12-12T19:33:43,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T19:33:43,521 INFO [Thread-932 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-12T19:33:43,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742106_1282 (size=12301) 2024-12-12T19:33:43,937 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/e64682114c6f430282062bb0f6b2fc64 2024-12-12T19:33:43,981 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/499470307eca4e60988b8230f25b0147 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/499470307eca4e60988b8230f25b0147 2024-12-12T19:33:43,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:43,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37356 deadline: 1734032083987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:44,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:44,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37334 deadline: 1734032084011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:44,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/499470307eca4e60988b8230f25b0147, entries=150, sequenceid=421, filesize=12.0 K 2024-12-12T19:33:44,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/afaf6aecf06348cfbf67a50875c70368 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/afaf6aecf06348cfbf67a50875c70368 2024-12-12T19:33:44,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/afaf6aecf06348cfbf67a50875c70368, entries=150, sequenceid=421, filesize=12.0 K 2024-12-12T19:33:44,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/e64682114c6f430282062bb0f6b2fc64 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/e64682114c6f430282062bb0f6b2fc64 2024-12-12T19:33:44,114 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/e64682114c6f430282062bb0f6b2fc64, entries=150, sequenceid=421, filesize=12.0 K 2024-12-12T19:33:44,115 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=26.84 KB/27480 for f61d5e1e16d46799d7435009ba841107 in 1269ms, sequenceid=421, compaction requested=true 2024-12-12T19:33:44,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:44,115 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:44,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:33:44,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:44,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:33:44,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:44,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f61d5e1e16d46799d7435009ba841107:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:33:44,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:33:44,119 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:44,125 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:44,125 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/A is initiating minor compaction (all files) 2024-12-12T19:33:44,125 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/A in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:44,126 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/39ad6a2800814352bec0d3039172addc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/61b149230c384f969a2468555e01a838, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/499470307eca4e60988b8230f25b0147] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=36.9 K 2024-12-12T19:33:44,131 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:44,132 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/B is initiating minor compaction (all files) 2024-12-12T19:33:44,132 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/B in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:44,132 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/3dee0dfb8bd0412f99edd8f8ee864141, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a12176419e9f44109864bcd4151eee1a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/afaf6aecf06348cfbf67a50875c70368] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=36.9 K 2024-12-12T19:33:44,132 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 39ad6a2800814352bec0d3039172addc, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1734032020185 2024-12-12T19:33:44,134 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 61b149230c384f969a2468555e01a838, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=389, earliestPutTs=1734032021329 2024-12-12T19:33:44,134 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3dee0dfb8bd0412f99edd8f8ee864141, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1734032020185 2024-12-12T19:33:44,136 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 499470307eca4e60988b8230f25b0147, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1734032021702 2024-12-12T19:33:44,137 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting a12176419e9f44109864bcd4151eee1a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=389, earliestPutTs=1734032021329 2024-12-12T19:33:44,140 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting afaf6aecf06348cfbf67a50875c70368, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1734032021702 2024-12-12T19:33:44,177 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#B#compaction#240 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:44,178 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/c92e586d3dff47c9971f909de256b6a6 is 50, key is test_row_0/B:col10/1734032021702/Put/seqid=0 2024-12-12T19:33:44,182 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#A#compaction#241 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:44,184 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/7d04dcaa707549aaa2a63517e4d6d94e is 50, key is test_row_0/A:col10/1734032021702/Put/seqid=0 2024-12-12T19:33:44,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742107_1283 (size=13323) 2024-12-12T19:33:44,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742108_1284 (size=13323) 2024-12-12T19:33:44,282 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/7d04dcaa707549aaa2a63517e4d6d94e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/7d04dcaa707549aaa2a63517e4d6d94e 2024-12-12T19:33:44,289 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/c92e586d3dff47c9971f909de256b6a6 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/c92e586d3dff47c9971f909de256b6a6 2024-12-12T19:33:44,300 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/B of f61d5e1e16d46799d7435009ba841107 into c92e586d3dff47c9971f909de256b6a6(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:44,300 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:44,300 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/A of f61d5e1e16d46799d7435009ba841107 into 7d04dcaa707549aaa2a63517e4d6d94e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:44,300 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:44,300 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/B, priority=13, startTime=1734032024115; duration=0sec 2024-12-12T19:33:44,300 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/A, priority=13, startTime=1734032024115; duration=0sec 2024-12-12T19:33:44,300 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:44,300 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:B 2024-12-12T19:33:44,300 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:44,303 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:44,303 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:A 2024-12-12T19:33:44,304 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:44,304 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): f61d5e1e16d46799d7435009ba841107/C is initiating minor compaction (all files) 2024-12-12T19:33:44,304 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f61d5e1e16d46799d7435009ba841107/C in TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:44,304 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/f4d852b2d5f74364887ebe532653d944, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/95e1e3e2a66e413082bf4c5aa1d7832d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/e64682114c6f430282062bb0f6b2fc64] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp, totalSize=36.9 K 2024-12-12T19:33:44,307 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4d852b2d5f74364887ebe532653d944, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1734032020185 2024-12-12T19:33:44,308 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95e1e3e2a66e413082bf4c5aa1d7832d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=389, earliestPutTs=1734032021329 2024-12-12T19:33:44,309 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting e64682114c6f430282062bb0f6b2fc64, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1734032021702 2024-12-12T19:33:44,339 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f61d5e1e16d46799d7435009ba841107#C#compaction#242 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:44,340 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/39c07f48b7a741338228959524ff48e5 is 50, key is test_row_0/C:col10/1734032021702/Put/seqid=0 2024-12-12T19:33:44,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742109_1285 (size=13323) 2024-12-12T19:33:44,355 DEBUG [Thread-928 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x088aa519 to 127.0.0.1:52216 2024-12-12T19:33:44,355 DEBUG [Thread-928 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:44,380 DEBUG [Thread-930 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e998dd3 to 127.0.0.1:52216 2024-12-12T19:33:44,380 DEBUG [Thread-930 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:44,787 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/39c07f48b7a741338228959524ff48e5 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/39c07f48b7a741338228959524ff48e5 2024-12-12T19:33:44,824 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f61d5e1e16d46799d7435009ba841107/C of f61d5e1e16d46799d7435009ba841107 into 39c07f48b7a741338228959524ff48e5(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:44,824 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:44,824 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107., storeName=f61d5e1e16d46799d7435009ba841107/C, priority=13, startTime=1734032024116; duration=0sec 2024-12-12T19:33:44,824 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:44,824 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f61d5e1e16d46799d7435009ba841107:C 2024-12-12T19:33:45,012 DEBUG [Thread-923 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17cf7fc0 to 127.0.0.1:52216 2024-12-12T19:33:45,012 DEBUG [Thread-923 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:45,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:45,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T19:33:45,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:45,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:45,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:45,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:45,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:45,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:45,035 DEBUG [Thread-926 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78b04266 to 127.0.0.1:52216 2024-12-12T19:33:45,035 DEBUG [Thread-926 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:45,048 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/c6f4d39a5c044f00b484b2991c9dfb70 is 50, key is test_row_0/A:col10/1734032025000/Put/seqid=0 2024-12-12T19:33:45,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742110_1286 (size=12301) 2024-12-12T19:33:45,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/c6f4d39a5c044f00b484b2991c9dfb70 2024-12-12T19:33:45,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/5ac56b9aeb164aa0b0856de903f1c5ca is 50, key is test_row_0/B:col10/1734032025000/Put/seqid=0 2024-12-12T19:33:45,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742111_1287 (size=12301) 2024-12-12T19:33:45,181 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/5ac56b9aeb164aa0b0856de903f1c5ca 2024-12-12T19:33:45,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/7539f536dca84d10b5df42de2363f339 is 50, key is test_row_0/C:col10/1734032025000/Put/seqid=0 2024-12-12T19:33:45,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742112_1288 (size=12301) 2024-12-12T19:33:45,263 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/7539f536dca84d10b5df42de2363f339 2024-12-12T19:33:45,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/c6f4d39a5c044f00b484b2991c9dfb70 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/c6f4d39a5c044f00b484b2991c9dfb70 2024-12-12T19:33:45,343 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/c6f4d39a5c044f00b484b2991c9dfb70, entries=150, sequenceid=435, filesize=12.0 K 2024-12-12T19:33:45,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/5ac56b9aeb164aa0b0856de903f1c5ca as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/5ac56b9aeb164aa0b0856de903f1c5ca 2024-12-12T19:33:45,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/5ac56b9aeb164aa0b0856de903f1c5ca, entries=150, sequenceid=435, filesize=12.0 K 2024-12-12T19:33:45,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/7539f536dca84d10b5df42de2363f339 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/7539f536dca84d10b5df42de2363f339 2024-12-12T19:33:45,457 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/7539f536dca84d10b5df42de2363f339, entries=150, sequenceid=435, filesize=12.0 K 2024-12-12T19:33:45,460 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for f61d5e1e16d46799d7435009ba841107 in 430ms, sequenceid=435, compaction requested=false 2024-12-12T19:33:45,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:48,650 DEBUG [Thread-921 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4dfb20f6 to 127.0.0.1:52216 2024-12-12T19:33:48,650 DEBUG [Thread-921 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:48,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T19:33:48,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-12-12T19:33:48,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 122 2024-12-12T19:33:48,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 66 2024-12-12T19:33:48,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 44 2024-12-12T19:33:48,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 46 2024-12-12T19:33:48,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T19:33:48,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4670 2024-12-12T19:33:48,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4743 2024-12-12T19:33:48,650 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4714 2024-12-12T19:33:48,651 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4657 2024-12-12T19:33:48,651 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4766 2024-12-12T19:33:48,651 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T19:33:48,651 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T19:33:48,651 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4ec09297 to 127.0.0.1:52216 2024-12-12T19:33:48,651 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:33:48,655 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T19:33:48,656 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T19:33:48,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:48,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T19:33:48,687 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734032028687"}]},"ts":"1734032028687"} 2024-12-12T19:33:48,692 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T19:33:48,711 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T19:33:48,713 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T19:33:48,716 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f61d5e1e16d46799d7435009ba841107, UNASSIGN}] 2024-12-12T19:33:48,717 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=83, ppid=82, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f61d5e1e16d46799d7435009ba841107, UNASSIGN 2024-12-12T19:33:48,718 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=f61d5e1e16d46799d7435009ba841107, regionState=CLOSING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:48,730 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T19:33:48,730 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; CloseRegionProcedure f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:33:48,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T19:33:48,891 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:48,895 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] handler.UnassignRegionHandler(124): Close f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:48,896 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T19:33:48,896 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1681): Closing f61d5e1e16d46799d7435009ba841107, disabling compactions & flushes 2024-12-12T19:33:48,896 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:48,896 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:48,896 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. after waiting 0 ms 2024-12-12T19:33:48,896 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:48,896 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(2837): Flushing f61d5e1e16d46799d7435009ba841107 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-12-12T19:33:48,896 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=A 2024-12-12T19:33:48,896 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:48,896 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=B 2024-12-12T19:33:48,897 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:48,897 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f61d5e1e16d46799d7435009ba841107, store=C 2024-12-12T19:33:48,897 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:48,924 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/7bc2f2cdcb884b3187474f5172bb945a is 50, key is test_row_2/A:col10/1734032028649/Put/seqid=0 2024-12-12T19:33:48,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742113_1289 (size=7415) 2024-12-12T19:33:48,972 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/7bc2f2cdcb884b3187474f5172bb945a 2024-12-12T19:33:48,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T19:33:49,005 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/f96b815852264a6f9f431a0f5c7f40f0 is 50, key is test_row_2/B:col10/1734032028649/Put/seqid=0 2024-12-12T19:33:49,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742114_1290 (size=7415) 2024-12-12T19:33:49,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T19:33:49,453 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/f96b815852264a6f9f431a0f5c7f40f0 2024-12-12T19:33:49,474 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/ac3d65fcf4834ae4a5762072b56c86c9 is 50, key is test_row_2/C:col10/1734032028649/Put/seqid=0 2024-12-12T19:33:49,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742115_1291 (size=7415) 2024-12-12T19:33:49,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T19:33:49,928 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/ac3d65fcf4834ae4a5762072b56c86c9 2024-12-12T19:33:49,935 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/A/7bc2f2cdcb884b3187474f5172bb945a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/7bc2f2cdcb884b3187474f5172bb945a 2024-12-12T19:33:49,942 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/7bc2f2cdcb884b3187474f5172bb945a, entries=50, sequenceid=439, filesize=7.2 K 2024-12-12T19:33:49,943 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/B/f96b815852264a6f9f431a0f5c7f40f0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/f96b815852264a6f9f431a0f5c7f40f0 2024-12-12T19:33:49,947 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/f96b815852264a6f9f431a0f5c7f40f0, entries=50, sequenceid=439, filesize=7.2 K 2024-12-12T19:33:49,948 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/.tmp/C/ac3d65fcf4834ae4a5762072b56c86c9 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/ac3d65fcf4834ae4a5762072b56c86c9 2024-12-12T19:33:49,952 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/ac3d65fcf4834ae4a5762072b56c86c9, entries=50, sequenceid=439, filesize=7.2 K 2024-12-12T19:33:49,953 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=0 B/0 for f61d5e1e16d46799d7435009ba841107 in 1057ms, sequenceid=439, compaction requested=true 2024-12-12T19:33:49,953 DEBUG [StoreCloser-TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/2ae32f170e304bce8c6aabed2fead8b2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/81ab8ffd524a42a49e24fb1979d33378, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/71c94fdf8f7840a7b3a7604b4d794198, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/c809b330744b42e49a4452cd5d6b4523, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/818600e51c4444e78023d1d02ea6dcf0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/cb90b828812d4be6b7093ae1f2a2a98f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/8f94e3f0b30843678b76e862cc123a61, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a9d70b10b3c0419cbf71b49af4d978d4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/840f398f7da846f59a18fd24ff422597, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/e83c7177b6334566ab6a76565c57cd7f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/3be2ac6efde840c3b160fd5a6b26b5a4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/517d6f3217634036991583e3de4a126e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/35291f7d3cc54f04b26a78eb6d5e6574, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/ee4a0658e31744cf9f976bb4d3a17748, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/4ac5a5d13cc441fcaa1ca8c42102881b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/5dd2eeb927a9445b9ea1b67d032487ba, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/1fe00b260c0b4472b1bcf440e93a1d5c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/0f9fa9ef08b94e10addf012ba03bbc93, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/210f2d7a2d6c49f892c515350ee7b5cb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/1f4f905e6d78459c8b46422ede3d7dac, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/b1dabc9bd8074bf2b1e32eef3c359e4f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/92de0add4a444f978a380f4cff195fd4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a2b6061a5a9540899b030e1cd85f6ad8, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a806e65a5a8c4210a483f2b7969f5387, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a78c7fe064594e0e8144ebc2bffdafba, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/c9081da0a2f641469b2dfc2e74cc6168, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/39ad6a2800814352bec0d3039172addc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/40d2177ce49847ea969b3036873ff506, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/61b149230c384f969a2468555e01a838, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/499470307eca4e60988b8230f25b0147] to archive 2024-12-12T19:33:49,954 DEBUG [StoreCloser-TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:33:49,957 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/81ab8ffd524a42a49e24fb1979d33378 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/81ab8ffd524a42a49e24fb1979d33378 2024-12-12T19:33:49,957 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/2ae32f170e304bce8c6aabed2fead8b2 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/2ae32f170e304bce8c6aabed2fead8b2 2024-12-12T19:33:49,959 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/c809b330744b42e49a4452cd5d6b4523 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/c809b330744b42e49a4452cd5d6b4523 2024-12-12T19:33:49,959 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/818600e51c4444e78023d1d02ea6dcf0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/818600e51c4444e78023d1d02ea6dcf0 2024-12-12T19:33:49,959 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/cb90b828812d4be6b7093ae1f2a2a98f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/cb90b828812d4be6b7093ae1f2a2a98f 2024-12-12T19:33:49,959 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/71c94fdf8f7840a7b3a7604b4d794198 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/71c94fdf8f7840a7b3a7604b4d794198 2024-12-12T19:33:49,959 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/840f398f7da846f59a18fd24ff422597 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/840f398f7da846f59a18fd24ff422597 2024-12-12T19:33:49,960 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/e83c7177b6334566ab6a76565c57cd7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/e83c7177b6334566ab6a76565c57cd7f 2024-12-12T19:33:49,960 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/3be2ac6efde840c3b160fd5a6b26b5a4 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/3be2ac6efde840c3b160fd5a6b26b5a4 2024-12-12T19:33:49,960 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a9d70b10b3c0419cbf71b49af4d978d4 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a9d70b10b3c0419cbf71b49af4d978d4 2024-12-12T19:33:49,960 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/517d6f3217634036991583e3de4a126e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/517d6f3217634036991583e3de4a126e 2024-12-12T19:33:49,960 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/8f94e3f0b30843678b76e862cc123a61 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/8f94e3f0b30843678b76e862cc123a61 2024-12-12T19:33:49,960 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/35291f7d3cc54f04b26a78eb6d5e6574 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/35291f7d3cc54f04b26a78eb6d5e6574 2024-12-12T19:33:49,960 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/ee4a0658e31744cf9f976bb4d3a17748 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/ee4a0658e31744cf9f976bb4d3a17748 2024-12-12T19:33:49,961 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/4ac5a5d13cc441fcaa1ca8c42102881b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/4ac5a5d13cc441fcaa1ca8c42102881b 2024-12-12T19:33:49,961 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/5dd2eeb927a9445b9ea1b67d032487ba to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/5dd2eeb927a9445b9ea1b67d032487ba 2024-12-12T19:33:49,962 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/1fe00b260c0b4472b1bcf440e93a1d5c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/1fe00b260c0b4472b1bcf440e93a1d5c 2024-12-12T19:33:49,963 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/210f2d7a2d6c49f892c515350ee7b5cb to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/210f2d7a2d6c49f892c515350ee7b5cb 2024-12-12T19:33:49,964 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/0f9fa9ef08b94e10addf012ba03bbc93 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/0f9fa9ef08b94e10addf012ba03bbc93 2024-12-12T19:33:49,964 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/b1dabc9bd8074bf2b1e32eef3c359e4f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/b1dabc9bd8074bf2b1e32eef3c359e4f 2024-12-12T19:33:49,965 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/1f4f905e6d78459c8b46422ede3d7dac to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/1f4f905e6d78459c8b46422ede3d7dac 2024-12-12T19:33:49,965 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a2b6061a5a9540899b030e1cd85f6ad8 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a2b6061a5a9540899b030e1cd85f6ad8 2024-12-12T19:33:49,966 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/92de0add4a444f978a380f4cff195fd4 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/92de0add4a444f978a380f4cff195fd4 2024-12-12T19:33:49,967 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a806e65a5a8c4210a483f2b7969f5387 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a806e65a5a8c4210a483f2b7969f5387 2024-12-12T19:33:49,968 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a78c7fe064594e0e8144ebc2bffdafba to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/a78c7fe064594e0e8144ebc2bffdafba 2024-12-12T19:33:49,968 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/c9081da0a2f641469b2dfc2e74cc6168 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/c9081da0a2f641469b2dfc2e74cc6168 2024-12-12T19:33:49,969 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/40d2177ce49847ea969b3036873ff506 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/40d2177ce49847ea969b3036873ff506 2024-12-12T19:33:49,969 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/39ad6a2800814352bec0d3039172addc to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/39ad6a2800814352bec0d3039172addc 2024-12-12T19:33:49,969 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/61b149230c384f969a2468555e01a838 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/61b149230c384f969a2468555e01a838 2024-12-12T19:33:49,969 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/499470307eca4e60988b8230f25b0147 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/499470307eca4e60988b8230f25b0147 2024-12-12T19:33:49,975 DEBUG [StoreCloser-TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/b1c834c713504768ae3713fffc6543c2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/63af6d0c4435429ea3234eb2d66b80af, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a941d2773aef4ff299ec62d95b007e29, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/2d4dd810efd240d192711b32ba86fba4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/36a2d9ec9ecc4e8790973e3f7166ecaf, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/179263e6543d44fc9fa34a48a6681846, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/2028c39bf29c4bf9afee8bb3cc682497, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/e5fddaa3e8c94a9b83c5fd7edea86016, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/bbce2a69df5141e49b270f693ac03087, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/52a0849f7d334c09b9768f9c069a308f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/4ac00b73ad2249f1ae1999df2ed18d30, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/1b972d13f5974c0f989848b34ded447b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/53eab477efd241a6933f3b6de618d667, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/db5b94e077a340eba52ee3571f17d741, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/63b4a497f9aa4b5bafe99db1bf929060, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/0d2052c28fe44b5d97ad2bdd4818697f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a78dbc9348ab4eeda94dc2e88ca5c5a8, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/77ecb336a1cc470fb7b2336b3839a636, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/d51aaf20ffac4c56ad86ec1d1837122f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/fb1ceac8c8a94787acdf0e4d6ba95176, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/e681a42e32c44e5ab95e3da598a0035b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/27b7f332c003476fa70425cff511f5c7, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/bc253ce42f2d49ffa7d6ba241b1b9ec4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/ffb3207c11d548a58f4c7b02a968b25f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/836046af34124c9a843f244542562b6a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a70fa653705f43f5a2ff4cbd468147fe, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/3dee0dfb8bd0412f99edd8f8ee864141, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/6c61df817409410fa30c1a2451fe70fc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a12176419e9f44109864bcd4151eee1a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/afaf6aecf06348cfbf67a50875c70368] to archive 2024-12-12T19:33:49,977 DEBUG [StoreCloser-TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:33:49,980 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a941d2773aef4ff299ec62d95b007e29 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a941d2773aef4ff299ec62d95b007e29 2024-12-12T19:33:49,980 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/e5fddaa3e8c94a9b83c5fd7edea86016 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/e5fddaa3e8c94a9b83c5fd7edea86016 2024-12-12T19:33:49,981 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/63af6d0c4435429ea3234eb2d66b80af to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/63af6d0c4435429ea3234eb2d66b80af 2024-12-12T19:33:49,981 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/36a2d9ec9ecc4e8790973e3f7166ecaf to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/36a2d9ec9ecc4e8790973e3f7166ecaf 2024-12-12T19:33:49,980 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/2028c39bf29c4bf9afee8bb3cc682497 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/2028c39bf29c4bf9afee8bb3cc682497 2024-12-12T19:33:49,981 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/179263e6543d44fc9fa34a48a6681846 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/179263e6543d44fc9fa34a48a6681846 2024-12-12T19:33:49,982 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/52a0849f7d334c09b9768f9c069a308f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/52a0849f7d334c09b9768f9c069a308f 2024-12-12T19:33:49,982 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/4ac00b73ad2249f1ae1999df2ed18d30 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/4ac00b73ad2249f1ae1999df2ed18d30 2024-12-12T19:33:49,982 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/bbce2a69df5141e49b270f693ac03087 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/bbce2a69df5141e49b270f693ac03087 2024-12-12T19:33:49,983 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/2d4dd810efd240d192711b32ba86fba4 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/2d4dd810efd240d192711b32ba86fba4 2024-12-12T19:33:49,983 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/1b972d13f5974c0f989848b34ded447b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/1b972d13f5974c0f989848b34ded447b 2024-12-12T19:33:49,984 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/db5b94e077a340eba52ee3571f17d741 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/db5b94e077a340eba52ee3571f17d741 2024-12-12T19:33:49,984 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/63b4a497f9aa4b5bafe99db1bf929060 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/63b4a497f9aa4b5bafe99db1bf929060 2024-12-12T19:33:49,984 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/0d2052c28fe44b5d97ad2bdd4818697f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/0d2052c28fe44b5d97ad2bdd4818697f 2024-12-12T19:33:49,985 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/b1c834c713504768ae3713fffc6543c2 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/b1c834c713504768ae3713fffc6543c2 2024-12-12T19:33:49,985 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a78dbc9348ab4eeda94dc2e88ca5c5a8 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a78dbc9348ab4eeda94dc2e88ca5c5a8 2024-12-12T19:33:49,985 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/77ecb336a1cc470fb7b2336b3839a636 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/77ecb336a1cc470fb7b2336b3839a636 2024-12-12T19:33:49,987 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/53eab477efd241a6933f3b6de618d667 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/53eab477efd241a6933f3b6de618d667 2024-12-12T19:33:49,988 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/27b7f332c003476fa70425cff511f5c7 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/27b7f332c003476fa70425cff511f5c7 2024-12-12T19:33:49,988 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/d51aaf20ffac4c56ad86ec1d1837122f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/d51aaf20ffac4c56ad86ec1d1837122f 2024-12-12T19:33:49,988 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/fb1ceac8c8a94787acdf0e4d6ba95176 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/fb1ceac8c8a94787acdf0e4d6ba95176 2024-12-12T19:33:49,990 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/ffb3207c11d548a58f4c7b02a968b25f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/ffb3207c11d548a58f4c7b02a968b25f 2024-12-12T19:33:49,990 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/bc253ce42f2d49ffa7d6ba241b1b9ec4 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/bc253ce42f2d49ffa7d6ba241b1b9ec4 2024-12-12T19:33:49,991 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/e681a42e32c44e5ab95e3da598a0035b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/e681a42e32c44e5ab95e3da598a0035b 2024-12-12T19:33:49,991 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a70fa653705f43f5a2ff4cbd468147fe to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a70fa653705f43f5a2ff4cbd468147fe 2024-12-12T19:33:49,991 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/836046af34124c9a843f244542562b6a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/836046af34124c9a843f244542562b6a 2024-12-12T19:33:49,992 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/3dee0dfb8bd0412f99edd8f8ee864141 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/3dee0dfb8bd0412f99edd8f8ee864141 2024-12-12T19:33:49,992 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/afaf6aecf06348cfbf67a50875c70368 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/afaf6aecf06348cfbf67a50875c70368 2024-12-12T19:33:49,992 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/6c61df817409410fa30c1a2451fe70fc to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/6c61df817409410fa30c1a2451fe70fc 2024-12-12T19:33:49,992 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a12176419e9f44109864bcd4151eee1a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/a12176419e9f44109864bcd4151eee1a 2024-12-12T19:33:50,002 DEBUG [StoreCloser-TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/d45437a1c9534364861fa7c63198c103, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/4c78d8ff4f4a43119c143afa2f882113, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/47f042d63e094abbbb71f22a06d3cc0a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/dcabf9a5ac2343d084247560bbc7c912, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/fe7463bc754748b68f404f135126b77f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/51f9a9d146584eb6bdead0fc65cce2a6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/514835e4c7ed44f8b024994eabe3bb5d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/a8e3670f80344ca78d9debc992832a60, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/29d4b5ebc3b645beb8d5be933b304ae2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/783c8db842a54bf68704e9ccf42e57b9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/b5bc0dd3c40d4112ad488641e05953e9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/9972ff0eff4d45879c7585128736b556, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/74deffd12d3b4f0eaa4c9bb98a76dabe, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/3422996b27ec4a648e40c9649b774b6b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/afeaf1a2b8514be984967c5e527074c2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/d8224b88f7524cf19a8af5b07e5d3132, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/21372d604a7846998c20ec6e91c21bdc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/f0f3b657de25483b8f89a498e5c4aa2e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/4150a0647e6243a993a8b6ea3924ad9e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/14b8dbca7ecc4e7f84e2432c732ae00b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/0224bb4081bc4a6f964dbf0e6e10e2c4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/9aadc8e08c47468f9e642b60618c637e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/50d9d15a2620464fa8cb1b8aa10e44b0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/9a248cbf9e4947df9b6607053b273594, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/8fc1cffc43d94523913c73711cd26bf0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/ab3a26d817e6434eab7d578fbfb16485, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/f4d852b2d5f74364887ebe532653d944, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/10f1d59ddb3642e2a54caa2b8ed50500, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/95e1e3e2a66e413082bf4c5aa1d7832d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/e64682114c6f430282062bb0f6b2fc64] to archive 2024-12-12T19:33:50,003 DEBUG [StoreCloser-TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:33:50,009 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/4c78d8ff4f4a43119c143afa2f882113 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/4c78d8ff4f4a43119c143afa2f882113 2024-12-12T19:33:50,011 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/fe7463bc754748b68f404f135126b77f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/fe7463bc754748b68f404f135126b77f 2024-12-12T19:33:50,011 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/dcabf9a5ac2343d084247560bbc7c912 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/dcabf9a5ac2343d084247560bbc7c912 2024-12-12T19:33:50,012 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/d45437a1c9534364861fa7c63198c103 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/d45437a1c9534364861fa7c63198c103 2024-12-12T19:33:50,012 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/a8e3670f80344ca78d9debc992832a60 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/a8e3670f80344ca78d9debc992832a60 2024-12-12T19:33:50,012 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/514835e4c7ed44f8b024994eabe3bb5d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/514835e4c7ed44f8b024994eabe3bb5d 2024-12-12T19:33:50,012 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/51f9a9d146584eb6bdead0fc65cce2a6 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/51f9a9d146584eb6bdead0fc65cce2a6 2024-12-12T19:33:50,013 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/47f042d63e094abbbb71f22a06d3cc0a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/47f042d63e094abbbb71f22a06d3cc0a 2024-12-12T19:33:50,013 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/29d4b5ebc3b645beb8d5be933b304ae2 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/29d4b5ebc3b645beb8d5be933b304ae2 2024-12-12T19:33:50,014 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/9972ff0eff4d45879c7585128736b556 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/9972ff0eff4d45879c7585128736b556 2024-12-12T19:33:50,014 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/afeaf1a2b8514be984967c5e527074c2 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/afeaf1a2b8514be984967c5e527074c2 2024-12-12T19:33:50,015 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/3422996b27ec4a648e40c9649b774b6b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/3422996b27ec4a648e40c9649b774b6b 2024-12-12T19:33:50,015 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/74deffd12d3b4f0eaa4c9bb98a76dabe to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/74deffd12d3b4f0eaa4c9bb98a76dabe 2024-12-12T19:33:50,015 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/783c8db842a54bf68704e9ccf42e57b9 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/783c8db842a54bf68704e9ccf42e57b9 2024-12-12T19:33:50,015 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/21372d604a7846998c20ec6e91c21bdc to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/21372d604a7846998c20ec6e91c21bdc 2024-12-12T19:33:50,016 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/f0f3b657de25483b8f89a498e5c4aa2e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/f0f3b657de25483b8f89a498e5c4aa2e 2024-12-12T19:33:50,016 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/d8224b88f7524cf19a8af5b07e5d3132 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/d8224b88f7524cf19a8af5b07e5d3132 2024-12-12T19:33:50,017 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/14b8dbca7ecc4e7f84e2432c732ae00b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/14b8dbca7ecc4e7f84e2432c732ae00b 2024-12-12T19:33:50,017 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/50d9d15a2620464fa8cb1b8aa10e44b0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/50d9d15a2620464fa8cb1b8aa10e44b0 2024-12-12T19:33:50,017 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/9aadc8e08c47468f9e642b60618c637e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/9aadc8e08c47468f9e642b60618c637e 2024-12-12T19:33:50,017 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/4150a0647e6243a993a8b6ea3924ad9e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/4150a0647e6243a993a8b6ea3924ad9e 2024-12-12T19:33:50,018 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/b5bc0dd3c40d4112ad488641e05953e9 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/b5bc0dd3c40d4112ad488641e05953e9 2024-12-12T19:33:50,022 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/0224bb4081bc4a6f964dbf0e6e10e2c4 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/0224bb4081bc4a6f964dbf0e6e10e2c4 2024-12-12T19:33:50,022 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/8fc1cffc43d94523913c73711cd26bf0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/8fc1cffc43d94523913c73711cd26bf0 2024-12-12T19:33:50,023 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/f4d852b2d5f74364887ebe532653d944 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/f4d852b2d5f74364887ebe532653d944 2024-12-12T19:33:50,023 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/9a248cbf9e4947df9b6607053b273594 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/9a248cbf9e4947df9b6607053b273594 2024-12-12T19:33:50,023 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/10f1d59ddb3642e2a54caa2b8ed50500 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/10f1d59ddb3642e2a54caa2b8ed50500 2024-12-12T19:33:50,023 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/95e1e3e2a66e413082bf4c5aa1d7832d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/95e1e3e2a66e413082bf4c5aa1d7832d 2024-12-12T19:33:50,023 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/ab3a26d817e6434eab7d578fbfb16485 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/ab3a26d817e6434eab7d578fbfb16485 2024-12-12T19:33:50,029 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/e64682114c6f430282062bb0f6b2fc64 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/e64682114c6f430282062bb0f6b2fc64 2024-12-12T19:33:50,045 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/recovered.edits/442.seqid, newMaxSeqId=442, maxSeqId=1 2024-12-12T19:33:50,046 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107. 2024-12-12T19:33:50,046 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1635): Region close journal for f61d5e1e16d46799d7435009ba841107: 2024-12-12T19:33:50,047 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] handler.UnassignRegionHandler(170): Closed f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:50,047 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=f61d5e1e16d46799d7435009ba841107, regionState=CLOSED 2024-12-12T19:33:50,050 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-12T19:33:50,050 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; CloseRegionProcedure f61d5e1e16d46799d7435009ba841107, server=4c9c438b6eeb,42689,1734031923038 in 1.3180 sec 2024-12-12T19:33:50,051 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=83, resume processing ppid=82 2024-12-12T19:33:50,051 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, ppid=82, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=f61d5e1e16d46799d7435009ba841107, UNASSIGN in 1.3340 sec 2024-12-12T19:33:50,053 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-12T19:33:50,053 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.3380 sec 2024-12-12T19:33:50,055 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734032030054"}]},"ts":"1734032030054"} 2024-12-12T19:33:50,056 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T19:33:50,079 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T19:33:50,081 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4240 sec 2024-12-12T19:33:50,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T19:33:50,787 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-12T19:33:50,787 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T19:33:50,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:50,789 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=85, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:50,790 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=85, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:50,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-12T19:33:50,799 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:50,815 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/recovered.edits] 2024-12-12T19:33:50,848 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/c6f4d39a5c044f00b484b2991c9dfb70 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/c6f4d39a5c044f00b484b2991c9dfb70 2024-12-12T19:33:50,848 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/7d04dcaa707549aaa2a63517e4d6d94e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/7d04dcaa707549aaa2a63517e4d6d94e 2024-12-12T19:33:50,849 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/7bc2f2cdcb884b3187474f5172bb945a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/A/7bc2f2cdcb884b3187474f5172bb945a 2024-12-12T19:33:50,877 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/f96b815852264a6f9f431a0f5c7f40f0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/f96b815852264a6f9f431a0f5c7f40f0 2024-12-12T19:33:50,877 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/5ac56b9aeb164aa0b0856de903f1c5ca to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/5ac56b9aeb164aa0b0856de903f1c5ca 2024-12-12T19:33:50,879 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/c92e586d3dff47c9971f909de256b6a6 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/B/c92e586d3dff47c9971f909de256b6a6 2024-12-12T19:33:50,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-12T19:33:50,903 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/7539f536dca84d10b5df42de2363f339 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/7539f536dca84d10b5df42de2363f339 2024-12-12T19:33:50,904 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/39c07f48b7a741338228959524ff48e5 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/39c07f48b7a741338228959524ff48e5 2024-12-12T19:33:50,904 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/ac3d65fcf4834ae4a5762072b56c86c9 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/C/ac3d65fcf4834ae4a5762072b56c86c9 2024-12-12T19:33:50,927 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/recovered.edits/442.seqid to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107/recovered.edits/442.seqid 2024-12-12T19:33:50,936 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/f61d5e1e16d46799d7435009ba841107 2024-12-12T19:33:50,939 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T19:33:50,955 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=85, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:50,958 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T19:33:50,975 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T19:33:50,977 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=85, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:50,977 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T19:33:50,977 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734032030977"}]},"ts":"9223372036854775807"} 2024-12-12T19:33:50,985 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T19:33:50,985 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => f61d5e1e16d46799d7435009ba841107, NAME => 'TestAcidGuarantees,,1734031998020.f61d5e1e16d46799d7435009ba841107.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T19:33:50,986 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T19:33:50,986 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734032030986"}]},"ts":"9223372036854775807"} 2024-12-12T19:33:51,003 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T19:33:51,034 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=85, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:51,041 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 247 msec 2024-12-12T19:33:51,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-12T19:33:51,094 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-12T19:33:51,109 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=243 (was 243), OpenFileDescriptor=451 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1421 (was 1471), ProcessCount=11 (was 11), AvailableMemoryMB=8510 (was 6225) - AvailableMemoryMB LEAK? - 2024-12-12T19:33:51,124 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=243, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=1421, ProcessCount=11, AvailableMemoryMB=8508 2024-12-12T19:33:51,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T19:33:51,132 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T19:33:51,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=86, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:51,144 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=86, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T19:33:51,144 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:51,147 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=86, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T19:33:51,149 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 86 2024-12-12T19:33:51,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T19:33:51,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742116_1292 (size=963) 2024-12-12T19:33:51,192 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98 2024-12-12T19:33:51,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742117_1293 (size=53) 2024-12-12T19:33:51,242 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:33:51,242 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing a329d898f2cbc923ae8747673ded3106, disabling compactions & flushes 2024-12-12T19:33:51,242 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:51,243 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:51,243 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. after waiting 0 ms 2024-12-12T19:33:51,243 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:51,243 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:51,243 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:51,248 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=86, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T19:33:51,248 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734032031248"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734032031248"}]},"ts":"1734032031248"} 2024-12-12T19:33:51,263 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T19:33:51,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T19:33:51,273 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=86, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T19:33:51,274 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734032031274"}]},"ts":"1734032031274"} 2024-12-12T19:33:51,278 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T19:33:51,361 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=87, ppid=86, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a329d898f2cbc923ae8747673ded3106, ASSIGN}] 2024-12-12T19:33:51,366 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a329d898f2cbc923ae8747673ded3106, ASSIGN 2024-12-12T19:33:51,375 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=87, ppid=86, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a329d898f2cbc923ae8747673ded3106, ASSIGN; state=OFFLINE, location=4c9c438b6eeb,42689,1734031923038; forceNewPlan=false, retain=false 2024-12-12T19:33:51,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T19:33:51,531 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=87 updating hbase:meta row=a329d898f2cbc923ae8747673ded3106, regionState=OPENING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:51,556 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; OpenRegionProcedure a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:33:51,722 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:51,726 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:51,726 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(7285): Opening region: {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} 2024-12-12T19:33:51,726 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:51,727 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:33:51,727 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(7327): checking encryption for a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:51,727 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(7330): checking classloading for a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:51,733 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:51,746 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:33:51,747 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a329d898f2cbc923ae8747673ded3106 columnFamilyName A 2024-12-12T19:33:51,747 DEBUG [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:51,758 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.HStore(327): Store=a329d898f2cbc923ae8747673ded3106/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:33:51,759 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:51,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T19:33:51,783 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:33:51,784 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a329d898f2cbc923ae8747673ded3106 columnFamilyName B 2024-12-12T19:33:51,784 DEBUG [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:51,790 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.HStore(327): Store=a329d898f2cbc923ae8747673ded3106/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:33:51,791 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:51,800 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:33:51,800 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a329d898f2cbc923ae8747673ded3106 columnFamilyName C 2024-12-12T19:33:51,800 DEBUG [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:51,807 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.HStore(327): Store=a329d898f2cbc923ae8747673ded3106/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:33:51,808 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:51,812 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:51,815 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:51,835 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T19:33:51,851 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(1085): writing seq id for a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:51,886 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T19:33:51,887 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(1102): Opened a329d898f2cbc923ae8747673ded3106; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59301489, jitterRate=-0.11633895337581635}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T19:33:51,888 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(1001): Region open journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:51,890 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., pid=88, masterSystemTime=1734032031722 2024-12-12T19:33:51,895 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:51,895 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:51,896 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=87 updating hbase:meta row=a329d898f2cbc923ae8747673ded3106, regionState=OPEN, openSeqNum=2, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:51,903 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-12T19:33:51,903 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; OpenRegionProcedure a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 in 344 msec 2024-12-12T19:33:51,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=87, resume processing ppid=86 2024-12-12T19:33:51,906 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, ppid=86, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a329d898f2cbc923ae8747673ded3106, ASSIGN in 542 msec 2024-12-12T19:33:51,907 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=86, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T19:33:51,907 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734032031907"}]},"ts":"1734032031907"} 2024-12-12T19:33:51,912 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T19:33:51,976 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=86, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T19:33:51,983 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 844 msec 2024-12-12T19:33:52,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T19:33:52,284 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 86 completed 2024-12-12T19:33:52,286 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c336ea4 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@167a78b0 2024-12-12T19:33:52,327 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31aea41b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:52,348 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:52,353 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41044, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:52,368 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T19:33:52,373 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43078, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T19:33:52,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T19:33:52,389 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T19:33:52,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-12T19:33:52,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742118_1294 (size=999) 2024-12-12T19:33:52,507 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-12T19:33:52,507 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-12T19:33:52,513 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T19:33:52,520 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a329d898f2cbc923ae8747673ded3106, REOPEN/MOVE}] 2024-12-12T19:33:52,527 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a329d898f2cbc923ae8747673ded3106, REOPEN/MOVE 2024-12-12T19:33:52,530 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=a329d898f2cbc923ae8747673ded3106, regionState=CLOSING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:52,537 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T19:33:52,537 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; CloseRegionProcedure a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:33:52,695 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:52,700 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(124): Close a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:52,700 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T19:33:52,700 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1681): Closing a329d898f2cbc923ae8747673ded3106, disabling compactions & flushes 2024-12-12T19:33:52,700 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:52,700 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:52,700 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. after waiting 0 ms 2024-12-12T19:33:52,700 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:52,719 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-12T19:33:52,720 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:52,720 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1635): Region close journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:52,720 WARN [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionServer(3786): Not adding moved region record: a329d898f2cbc923ae8747673ded3106 to self. 2024-12-12T19:33:52,727 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(170): Closed a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:52,731 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=a329d898f2cbc923ae8747673ded3106, regionState=CLOSED 2024-12-12T19:33:52,753 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-12T19:33:52,753 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseRegionProcedure a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 in 210 msec 2024-12-12T19:33:52,754 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a329d898f2cbc923ae8747673ded3106, REOPEN/MOVE; state=CLOSED, location=4c9c438b6eeb,42689,1734031923038; forceNewPlan=false, retain=true 2024-12-12T19:33:52,904 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=a329d898f2cbc923ae8747673ded3106, regionState=OPENING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:52,905 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=91, state=RUNNABLE; OpenRegionProcedure a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:33:53,058 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:53,073 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:53,073 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(7285): Opening region: {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} 2024-12-12T19:33:53,074 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:53,074 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:33:53,074 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(7327): checking encryption for a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:53,074 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(7330): checking classloading for a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:53,095 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:53,100 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:33:53,100 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a329d898f2cbc923ae8747673ded3106 columnFamilyName A 2024-12-12T19:33:53,109 DEBUG [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:53,115 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.HStore(327): Store=a329d898f2cbc923ae8747673ded3106/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:33:53,116 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:53,123 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:33:53,123 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a329d898f2cbc923ae8747673ded3106 columnFamilyName B 2024-12-12T19:33:53,124 DEBUG [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:53,128 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.HStore(327): Store=a329d898f2cbc923ae8747673ded3106/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:33:53,128 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:53,129 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:33:53,129 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a329d898f2cbc923ae8747673ded3106 columnFamilyName C 2024-12-12T19:33:53,129 DEBUG [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:53,133 INFO [StoreOpener-a329d898f2cbc923ae8747673ded3106-1 {}] regionserver.HStore(327): Store=a329d898f2cbc923ae8747673ded3106/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:33:53,133 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:53,135 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:53,136 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:53,142 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T19:33:53,147 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(1085): writing seq id for a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:53,150 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(1102): Opened a329d898f2cbc923ae8747673ded3106; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62236964, jitterRate=-0.0725969672203064}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T19:33:53,150 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(1001): Region open journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:53,151 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., pid=93, masterSystemTime=1734032033058 2024-12-12T19:33:53,154 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:53,154 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:53,156 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=a329d898f2cbc923ae8747673ded3106, regionState=OPEN, openSeqNum=5, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:53,167 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=91 2024-12-12T19:33:53,167 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=91, state=SUCCESS; OpenRegionProcedure a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 in 253 msec 2024-12-12T19:33:53,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=90 2024-12-12T19:33:53,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=90, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a329d898f2cbc923ae8747673ded3106, REOPEN/MOVE in 647 msec 2024-12-12T19:33:53,175 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-12T19:33:53,175 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 658 msec 2024-12-12T19:33:53,178 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 784 msec 2024-12-12T19:33:53,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T19:33:53,181 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f94d721 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5aee939b 2024-12-12T19:33:53,255 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e247aa1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:53,257 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x319559be to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f49665c 2024-12-12T19:33:53,311 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2205f666, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:53,312 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c907e21 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683f8469 2024-12-12T19:33:53,357 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6584e9ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:53,358 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61ec0f48 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75e4d3d0 2024-12-12T19:33:53,383 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37ec8e3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:53,384 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7819b9e2 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b308f62 2024-12-12T19:33:53,427 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@787e5169, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:53,429 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68035c67 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@627cad17 2024-12-12T19:33:53,454 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37a637ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:53,455 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3eab689a to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39387e4d 2024-12-12T19:33:53,499 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fa53591, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:53,500 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x59bd764a to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@238db126 2024-12-12T19:33:53,553 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3512017b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:53,554 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x022a6e9f to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c60eb7d 2024-12-12T19:33:53,605 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@695c2253, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:53,606 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x32c12a30 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79b10416 2024-12-12T19:33:53,632 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7177efc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:33:53,643 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:33:53,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=94, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=94, table=TestAcidGuarantees 2024-12-12T19:33:53,645 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=94, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=94, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:33:53,646 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=94, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=94, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:33:53,646 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:33:53,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T19:33:53,673 DEBUG [hconnection-0x5ffea27b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:53,674 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41056, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:53,675 DEBUG [hconnection-0x28a8976e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:53,676 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41062, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:53,679 DEBUG [hconnection-0xcb56e28-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:53,680 DEBUG [hconnection-0x7bba170f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:53,680 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41078, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:53,681 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41084, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:53,694 DEBUG [hconnection-0x70610060-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:53,697 DEBUG [hconnection-0x48d6eb2b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:53,697 DEBUG [hconnection-0x335e94e6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:53,697 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41090, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:53,700 DEBUG [hconnection-0x1f6c39f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:53,700 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41106, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:53,703 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41128, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:53,704 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41120, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:53,704 DEBUG [hconnection-0x79f32bbe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:53,708 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41138, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:53,709 DEBUG [hconnection-0x7335ce06-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:33:53,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:53,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T19:33:53,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:33:53,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:53,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:33:53,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:53,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:33:53,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:53,711 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41144, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:33:53,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T19:33:53,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:53,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734032093761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:53,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:53,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41144 deadline: 1734032093772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:53,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:53,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032093773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:53,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:53,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032093776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:53,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:53,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032093776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:53,781 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412125a6f2e184a3641319cf74be8d0d9381a_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032033708/Put/seqid=0 2024-12-12T19:33:53,798 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:53,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-12T19:33:53,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:53,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:33:53,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:53,799 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:53,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:53,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:53,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742119_1295 (size=12154) 2024-12-12T19:33:53,856 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:53,861 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412125a6f2e184a3641319cf74be8d0d9381a_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412125a6f2e184a3641319cf74be8d0d9381a_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:53,862 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/0280754184d94d21bba84e675e1b1c6f, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:33:53,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/0280754184d94d21bba84e675e1b1c6f is 175, key is test_row_0/A:col10/1734032033708/Put/seqid=0 2024-12-12T19:33:53,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742120_1296 (size=30955) 2024-12-12T19:33:53,871 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/0280754184d94d21bba84e675e1b1c6f 2024-12-12T19:33:53,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:53,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41144 deadline: 1734032093877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:53,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:53,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032093877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:53,886 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:53,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734032093877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:53,886 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:53,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032093880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:53,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:53,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032093892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:53,946 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/9c77965a306b4986a777b61157371cfe is 50, key is test_row_0/B:col10/1734032033708/Put/seqid=0 2024-12-12T19:33:53,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T19:33:53,954 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:53,954 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-12T19:33:53,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:53,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:33:53,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:53,955 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:53,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:53,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:53,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742121_1297 (size=12001) 2024-12-12T19:33:54,001 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/9c77965a306b4986a777b61157371cfe 2024-12-12T19:33:54,039 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/82f419259bf24f4597c54a058ae55087 is 50, key is test_row_0/C:col10/1734032033708/Put/seqid=0 2024-12-12T19:33:54,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032094087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41144 deadline: 1734032094092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032094092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742122_1298 (size=12001) 2024-12-12T19:33:54,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734032094092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/82f419259bf24f4597c54a058ae55087 2024-12-12T19:33:54,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032094097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/0280754184d94d21bba84e675e1b1c6f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0280754184d94d21bba84e675e1b1c6f 2024-12-12T19:33:54,110 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,115 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-12T19:33:54,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:54,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:33:54,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:54,116 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:54,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:54,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0280754184d94d21bba84e675e1b1c6f, entries=150, sequenceid=17, filesize=30.2 K 2024-12-12T19:33:54,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:54,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/9c77965a306b4986a777b61157371cfe as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/9c77965a306b4986a777b61157371cfe 2024-12-12T19:33:54,145 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/9c77965a306b4986a777b61157371cfe, entries=150, sequenceid=17, filesize=11.7 K 2024-12-12T19:33:54,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/82f419259bf24f4597c54a058ae55087 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/82f419259bf24f4597c54a058ae55087 2024-12-12T19:33:54,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/82f419259bf24f4597c54a058ae55087, entries=150, sequenceid=17, filesize=11.7 K 2024-12-12T19:33:54,163 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for a329d898f2cbc923ae8747673ded3106 in 454ms, sequenceid=17, compaction requested=false 2024-12-12T19:33:54,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:54,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T19:33:54,275 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,277 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-12T19:33:54,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:54,279 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T19:33:54,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:33:54,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:54,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:33:54,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:54,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:33:54,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:54,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121236d1d52fd13f44c3afb5883e5a4807cf_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032033762/Put/seqid=0 2024-12-12T19:33:54,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742123_1299 (size=12154) 2024-12-12T19:33:54,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:54,340 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121236d1d52fd13f44c3afb5883e5a4807cf_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121236d1d52fd13f44c3afb5883e5a4807cf_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:54,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/5f1bff8f59584e8fb8ae0ca205ddf40a, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:33:54,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/5f1bff8f59584e8fb8ae0ca205ddf40a is 175, key is test_row_0/A:col10/1734032033762/Put/seqid=0 2024-12-12T19:33:54,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742124_1300 (size=30955) 2024-12-12T19:33:54,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:54,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:33:54,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032094407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41144 deadline: 1734032094415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734032094420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032094422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032094424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032094531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734032094532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032094532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41144 deadline: 1734032094532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032094532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,748 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734032094748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41144 deadline: 1734032094748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032094748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T19:33:54,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032094752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:54,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032094755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:54,779 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/5f1bff8f59584e8fb8ae0ca205ddf40a 2024-12-12T19:33:54,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/54a01e7ea1bd42ed8f86e260afe19ee4 is 50, key is test_row_0/B:col10/1734032033762/Put/seqid=0 2024-12-12T19:33:54,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742125_1301 (size=12001) 2024-12-12T19:33:55,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734032095056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41144 deadline: 1734032095057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032095058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032095061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032095062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,249 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/54a01e7ea1bd42ed8f86e260afe19ee4 2024-12-12T19:33:55,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/685cb7ccb6ed41ed945a90daa81f748f is 50, key is test_row_0/C:col10/1734032033762/Put/seqid=0 2024-12-12T19:33:55,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742126_1302 (size=12001) 2024-12-12T19:33:55,315 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/685cb7ccb6ed41ed945a90daa81f748f 2024-12-12T19:33:55,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/5f1bff8f59584e8fb8ae0ca205ddf40a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/5f1bff8f59584e8fb8ae0ca205ddf40a 2024-12-12T19:33:55,333 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/5f1bff8f59584e8fb8ae0ca205ddf40a, entries=150, sequenceid=42, filesize=30.2 K 2024-12-12T19:33:55,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/54a01e7ea1bd42ed8f86e260afe19ee4 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/54a01e7ea1bd42ed8f86e260afe19ee4 2024-12-12T19:33:55,340 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/54a01e7ea1bd42ed8f86e260afe19ee4, entries=150, sequenceid=42, filesize=11.7 K 2024-12-12T19:33:55,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/685cb7ccb6ed41ed945a90daa81f748f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/685cb7ccb6ed41ed945a90daa81f748f 2024-12-12T19:33:55,355 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/685cb7ccb6ed41ed945a90daa81f748f, entries=150, sequenceid=42, filesize=11.7 K 2024-12-12T19:33:55,357 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for a329d898f2cbc923ae8747673ded3106 in 1078ms, sequenceid=42, compaction requested=false 2024-12-12T19:33:55,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:55,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:55,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=95 2024-12-12T19:33:55,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=95 2024-12-12T19:33:55,365 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-12-12T19:33:55,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7140 sec 2024-12-12T19:33:55,370 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=94, table=TestAcidGuarantees in 1.7230 sec 2024-12-12T19:33:55,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:55,568 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T19:33:55,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:33:55,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:55,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:33:55,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:55,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:33:55,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:55,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121279e4cbeea45842c08413d39c0847692d_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032034407/Put/seqid=0 2024-12-12T19:33:55,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742127_1303 (size=12154) 2024-12-12T19:33:55,620 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T19:33:55,623 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:55,633 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121279e4cbeea45842c08413d39c0847692d_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121279e4cbeea45842c08413d39c0847692d_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:55,636 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/0701f7a1f360448f9a26b7efe485718e, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:33:55,636 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/0701f7a1f360448f9a26b7efe485718e is 175, key is test_row_0/A:col10/1734032034407/Put/seqid=0 2024-12-12T19:33:55,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742128_1304 (size=30955) 2024-12-12T19:33:55,647 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/0701f7a1f360448f9a26b7efe485718e 2024-12-12T19:33:55,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41144 deadline: 1734032095632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734032095633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032095644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032095652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032095655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,671 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/eeb97d9270d5490685fafdb2d7e92750 is 50, key is test_row_0/B:col10/1734032034407/Put/seqid=0 2024-12-12T19:33:55,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742129_1305 (size=12001) 2024-12-12T19:33:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T19:33:55,756 INFO [Thread-1370 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 94 completed 2024-12-12T19:33:55,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41144 deadline: 1734032095753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,757 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:33:55,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=96, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees 2024-12-12T19:33:55,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734032095757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,764 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=96, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:33:55,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T19:33:55,766 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=96, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:33:55,766 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:33:55,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032095765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032095770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032095775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T19:33:55,918 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-12T19:33:55,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:55,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:33:55,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:55,920 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:55,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:55,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:55,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41144 deadline: 1734032095960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734032095968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032095989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:55,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:55,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032095990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:56,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032095996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T19:33:56,075 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-12T19:33:56,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:56,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:33:56,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:56,080 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/eeb97d9270d5490685fafdb2d7e92750 2024-12-12T19:33:56,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/fa6535574f1e4fc3babf9237c596084f is 50, key is test_row_0/C:col10/1734032034407/Put/seqid=0 2024-12-12T19:33:56,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742130_1306 (size=12001) 2024-12-12T19:33:56,219 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/fa6535574f1e4fc3babf9237c596084f 2024-12-12T19:33:56,242 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-12T19:33:56,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:56,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:33:56,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:56,243 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/0701f7a1f360448f9a26b7efe485718e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0701f7a1f360448f9a26b7efe485718e 2024-12-12T19:33:56,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0701f7a1f360448f9a26b7efe485718e, entries=150, sequenceid=55, filesize=30.2 K 2024-12-12T19:33:56,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:56,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41144 deadline: 1734032096271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:56,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734032096274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/eeb97d9270d5490685fafdb2d7e92750 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/eeb97d9270d5490685fafdb2d7e92750 2024-12-12T19:33:56,286 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/eeb97d9270d5490685fafdb2d7e92750, entries=150, sequenceid=55, filesize=11.7 K 2024-12-12T19:33:56,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/fa6535574f1e4fc3babf9237c596084f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/fa6535574f1e4fc3babf9237c596084f 2024-12-12T19:33:56,294 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/fa6535574f1e4fc3babf9237c596084f, entries=150, sequenceid=55, filesize=11.7 K 2024-12-12T19:33:56,295 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for a329d898f2cbc923ae8747673ded3106 in 728ms, sequenceid=55, compaction requested=true 2024-12-12T19:33:56,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:56,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:33:56,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:56,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:33:56,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:33:56,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:33:56,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-12T19:33:56,296 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:56,298 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:56,304 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:56,304 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/A is initiating minor compaction (all files) 2024-12-12T19:33:56,304 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/A in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:56,304 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0280754184d94d21bba84e675e1b1c6f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/5f1bff8f59584e8fb8ae0ca205ddf40a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0701f7a1f360448f9a26b7efe485718e] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=90.7 K 2024-12-12T19:33:56,304 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:56,304 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0280754184d94d21bba84e675e1b1c6f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/5f1bff8f59584e8fb8ae0ca205ddf40a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0701f7a1f360448f9a26b7efe485718e] 2024-12-12T19:33:56,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:56,305 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T19:33:56,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:33:56,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:56,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:33:56,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:56,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:33:56,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:56,306 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:56,306 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/C is initiating minor compaction (all files) 2024-12-12T19:33:56,306 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 0280754184d94d21bba84e675e1b1c6f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734032033704 2024-12-12T19:33:56,306 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/C in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:56,306 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/82f419259bf24f4597c54a058ae55087, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/685cb7ccb6ed41ed945a90daa81f748f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/fa6535574f1e4fc3babf9237c596084f] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=35.2 K 2024-12-12T19:33:56,307 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f1bff8f59584e8fb8ae0ca205ddf40a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1734032033756 2024-12-12T19:33:56,307 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82f419259bf24f4597c54a058ae55087, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734032033704 2024-12-12T19:33:56,307 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 0701f7a1f360448f9a26b7efe485718e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734032034407 2024-12-12T19:33:56,307 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 685cb7ccb6ed41ed945a90daa81f748f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1734032033756 2024-12-12T19:33:56,308 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa6535574f1e4fc3babf9237c596084f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734032034407 2024-12-12T19:33:56,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212669f719f868d4c8baea1ba71942b1fae_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032035640/Put/seqid=0 2024-12-12T19:33:56,354 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#C#compaction#259 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:56,355 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/907624b7a69d40f59d066f25f7090270 is 50, key is test_row_0/C:col10/1734032034407/Put/seqid=0 2024-12-12T19:33:56,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742131_1307 (size=14594) 2024-12-12T19:33:56,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:56,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032096353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:56,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032096355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,371 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:33:56,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T19:33:56,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:56,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032096354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,394 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-12T19:33:56,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:56,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:33:56,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:56,402 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,415 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212c869cbb8be0545cba3c9bdc326910e64_a329d898f2cbc923ae8747673ded3106 store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:33:56,417 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212c869cbb8be0545cba3c9bdc326910e64_a329d898f2cbc923ae8747673ded3106, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:33:56,417 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c869cbb8be0545cba3c9bdc326910e64_a329d898f2cbc923ae8747673ded3106 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:33:56,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742132_1308 (size=12104) 2024-12-12T19:33:56,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742133_1309 (size=4469) 2024-12-12T19:33:56,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:56,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032096474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:56,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032096475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,493 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#A#compaction#260 average throughput is 0.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:56,493 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/70cfedab7dac4a74afa7c4df5598ed90 is 175, key is test_row_0/A:col10/1734032034407/Put/seqid=0 2024-12-12T19:33:56,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:56,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032096491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742134_1310 (size=31058) 2024-12-12T19:33:56,561 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-12T19:33:56,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:56,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:33:56,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:56,570 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:56,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032096683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:56,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032096690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:56,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032096724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,729 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-12T19:33:56,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:56,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:33:56,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:56,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,759 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:56,785 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212669f719f868d4c8baea1ba71942b1fae_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212669f719f868d4c8baea1ba71942b1fae_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:56,791 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/7cb136ea601d4cfd8f6dd032a11096a9, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:33:56,792 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/7cb136ea601d4cfd8f6dd032a11096a9 is 175, key is test_row_0/A:col10/1734032035640/Put/seqid=0 2024-12-12T19:33:56,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:56,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41144 deadline: 1734032096785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:56,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734032096788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742135_1311 (size=39549) 2024-12-12T19:33:56,860 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/7cb136ea601d4cfd8f6dd032a11096a9 2024-12-12T19:33:56,872 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/907624b7a69d40f59d066f25f7090270 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/907624b7a69d40f59d066f25f7090270 2024-12-12T19:33:56,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T19:33:56,892 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:56,892 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-12T19:33:56,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:56,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:33:56,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:56,893 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:56,905 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/C of a329d898f2cbc923ae8747673ded3106 into 907624b7a69d40f59d066f25f7090270(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:56,905 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:56,905 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/C, priority=13, startTime=1734032036295; duration=0sec 2024-12-12T19:33:56,906 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:56,906 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:C 2024-12-12T19:33:56,906 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:33:56,906 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/bc203251dad94e84baeb1550d03a085e is 50, key is test_row_0/B:col10/1734032035640/Put/seqid=0 2024-12-12T19:33:56,928 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:33:56,928 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/B is initiating minor compaction (all files) 2024-12-12T19:33:56,928 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/B in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:56,928 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/9c77965a306b4986a777b61157371cfe, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/54a01e7ea1bd42ed8f86e260afe19ee4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/eeb97d9270d5490685fafdb2d7e92750] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=35.2 K 2024-12-12T19:33:56,931 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c77965a306b4986a777b61157371cfe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734032033704 2024-12-12T19:33:56,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742136_1312 (size=12001) 2024-12-12T19:33:56,937 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 54a01e7ea1bd42ed8f86e260afe19ee4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1734032033756 2024-12-12T19:33:56,938 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting eeb97d9270d5490685fafdb2d7e92750, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734032034407 2024-12-12T19:33:56,951 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/bc203251dad94e84baeb1550d03a085e 2024-12-12T19:33:56,960 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/70cfedab7dac4a74afa7c4df5598ed90 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/70cfedab7dac4a74afa7c4df5598ed90 2024-12-12T19:33:56,962 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#B#compaction#262 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:56,963 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/6b5c96579e9f4b92b14ac86fecc733dc is 50, key is test_row_0/B:col10/1734032034407/Put/seqid=0 2024-12-12T19:33:56,983 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/A of a329d898f2cbc923ae8747673ded3106 into 70cfedab7dac4a74afa7c4df5598ed90(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:56,983 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:56,983 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/A, priority=13, startTime=1734032036295; duration=0sec 2024-12-12T19:33:56,983 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:56,983 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:A 2024-12-12T19:33:57,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:57,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032096993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:57,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:57,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032096996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:57,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:57,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032097037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:57,048 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:57,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742137_1313 (size=12104) 2024-12-12T19:33:57,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-12T19:33:57,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:57,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:33:57,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:57,050 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:57,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:57,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/acc46ddabae94da7baa7f4de17a38794 is 50, key is test_row_0/C:col10/1734032035640/Put/seqid=0 2024-12-12T19:33:57,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:57,073 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/6b5c96579e9f4b92b14ac86fecc733dc as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/6b5c96579e9f4b92b14ac86fecc733dc 2024-12-12T19:33:57,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742138_1314 (size=12001) 2024-12-12T19:33:57,084 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/B of a329d898f2cbc923ae8747673ded3106 into 6b5c96579e9f4b92b14ac86fecc733dc(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:57,084 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:57,084 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/B, priority=13, startTime=1734032036295; duration=0sec 2024-12-12T19:33:57,084 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:57,084 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:B 2024-12-12T19:33:57,084 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/acc46ddabae94da7baa7f4de17a38794 2024-12-12T19:33:57,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/7cb136ea601d4cfd8f6dd032a11096a9 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/7cb136ea601d4cfd8f6dd032a11096a9 2024-12-12T19:33:57,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/7cb136ea601d4cfd8f6dd032a11096a9, entries=200, sequenceid=79, filesize=38.6 K 2024-12-12T19:33:57,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/bc203251dad94e84baeb1550d03a085e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/bc203251dad94e84baeb1550d03a085e 2024-12-12T19:33:57,202 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:57,204 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-12T19:33:57,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:57,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:33:57,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:57,205 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:57,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:57,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:33:57,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/bc203251dad94e84baeb1550d03a085e, entries=150, sequenceid=79, filesize=11.7 K 2024-12-12T19:33:57,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/acc46ddabae94da7baa7f4de17a38794 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/acc46ddabae94da7baa7f4de17a38794 2024-12-12T19:33:57,264 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/acc46ddabae94da7baa7f4de17a38794, entries=150, sequenceid=79, filesize=11.7 K 2024-12-12T19:33:57,268 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for a329d898f2cbc923ae8747673ded3106 in 963ms, sequenceid=79, compaction requested=false 2024-12-12T19:33:57,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:57,359 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:57,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-12T19:33:57,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:57,362 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:33:57,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:33:57,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:57,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:33:57,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:57,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:33:57,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:57,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212191c276daaff47c1a2004bb3873309a8_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032036353/Put/seqid=0 2024-12-12T19:33:57,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742139_1315 (size=12154) 2024-12-12T19:33:57,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:57,503 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212191c276daaff47c1a2004bb3873309a8_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212191c276daaff47c1a2004bb3873309a8_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:57,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/685f79261aeb4ee5af42be2897faf325, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:33:57,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/685f79261aeb4ee5af42be2897faf325 is 175, key is test_row_0/A:col10/1734032036353/Put/seqid=0 2024-12-12T19:33:57,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:33:57,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:57,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742140_1316 (size=30955) 2024-12-12T19:33:57,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:57,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:57,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032097668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:57,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032097670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:57,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:57,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032097680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:57,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032097782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:57,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:57,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032097790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:57,803 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:57,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032097799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:57,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:57,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41144 deadline: 1734032097806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:57,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734032097835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:57,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T19:33:57,956 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/685f79261aeb4ee5af42be2897faf325 2024-12-12T19:33:57,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/5af54c10ffdb4b26b0d827b86e60df18 is 50, key is test_row_0/B:col10/1734032036353/Put/seqid=0 2024-12-12T19:33:58,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:58,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032098001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:58,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032097999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:58,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:58,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032098014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:58,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742141_1317 (size=12001) 2024-12-12T19:33:58,059 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/5af54c10ffdb4b26b0d827b86e60df18 2024-12-12T19:33:58,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/3de39448bda949ecbe5499786653a5e6 is 50, key is test_row_0/C:col10/1734032036353/Put/seqid=0 2024-12-12T19:33:58,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742142_1318 (size=12001) 2024-12-12T19:33:58,204 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/3de39448bda949ecbe5499786653a5e6 2024-12-12T19:33:58,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/685f79261aeb4ee5af42be2897faf325 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/685f79261aeb4ee5af42be2897faf325 2024-12-12T19:33:58,271 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/685f79261aeb4ee5af42be2897faf325, entries=150, sequenceid=94, filesize=30.2 K 2024-12-12T19:33:58,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/5af54c10ffdb4b26b0d827b86e60df18 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/5af54c10ffdb4b26b0d827b86e60df18 2024-12-12T19:33:58,291 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/5af54c10ffdb4b26b0d827b86e60df18, entries=150, sequenceid=94, filesize=11.7 K 2024-12-12T19:33:58,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/3de39448bda949ecbe5499786653a5e6 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3de39448bda949ecbe5499786653a5e6 2024-12-12T19:33:58,316 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3de39448bda949ecbe5499786653a5e6, entries=150, sequenceid=94, filesize=11.7 K 2024-12-12T19:33:58,324 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for a329d898f2cbc923ae8747673ded3106 in 962ms, sequenceid=94, compaction requested=true 2024-12-12T19:33:58,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:58,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:58,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=97 2024-12-12T19:33:58,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=97 2024-12-12T19:33:58,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:58,326 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T19:33:58,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:33:58,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:58,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:33:58,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:58,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:33:58,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:58,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-12-12T19:33:58,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5590 sec 2024-12-12T19:33:58,329 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees in 2.5710 sec 2024-12-12T19:33:58,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212cca452b599ec4432aaf209dcac24c3d8_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032038324/Put/seqid=0 2024-12-12T19:33:58,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742143_1319 (size=14594) 2024-12-12T19:33:58,407 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:58,423 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212cca452b599ec4432aaf209dcac24c3d8_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212cca452b599ec4432aaf209dcac24c3d8_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:58,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:58,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032098398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:58,424 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/9b16dc5bea1b4ae38e0d1ab295fce839, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:33:58,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/9b16dc5bea1b4ae38e0d1ab295fce839 is 175, key is test_row_0/A:col10/1734032038324/Put/seqid=0 2024-12-12T19:33:58,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:58,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032098410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:58,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:58,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032098411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:58,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742144_1320 (size=39549) 2024-12-12T19:33:58,475 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/9b16dc5bea1b4ae38e0d1ab295fce839 2024-12-12T19:33:58,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/4bf2d338be4b490b911db784a43964a0 is 50, key is test_row_0/B:col10/1734032038324/Put/seqid=0 2024-12-12T19:33:58,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:58,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032098528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:58,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:58,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032098536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:58,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:58,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032098539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:58,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742145_1321 (size=12001) 2024-12-12T19:33:58,591 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/4bf2d338be4b490b911db784a43964a0 2024-12-12T19:33:58,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/1e8993cba70e48439ee9df2ed14e5d08 is 50, key is test_row_0/C:col10/1734032038324/Put/seqid=0 2024-12-12T19:33:58,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742146_1322 (size=12001) 2024-12-12T19:33:58,677 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/1e8993cba70e48439ee9df2ed14e5d08 2024-12-12T19:33:58,716 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/9b16dc5bea1b4ae38e0d1ab295fce839 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/9b16dc5bea1b4ae38e0d1ab295fce839 2024-12-12T19:33:58,723 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/9b16dc5bea1b4ae38e0d1ab295fce839, entries=200, sequenceid=119, filesize=38.6 K 2024-12-12T19:33:58,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/4bf2d338be4b490b911db784a43964a0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4bf2d338be4b490b911db784a43964a0 2024-12-12T19:33:58,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4bf2d338be4b490b911db784a43964a0, entries=150, sequenceid=119, filesize=11.7 K 2024-12-12T19:33:58,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/1e8993cba70e48439ee9df2ed14e5d08 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/1e8993cba70e48439ee9df2ed14e5d08 2024-12-12T19:33:58,739 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/1e8993cba70e48439ee9df2ed14e5d08, entries=150, sequenceid=119, filesize=11.7 K 2024-12-12T19:33:58,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for a329d898f2cbc923ae8747673ded3106 in 416ms, sequenceid=119, compaction requested=true 2024-12-12T19:33:58,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:58,742 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:33:58,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:33:58,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:58,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:33:58,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:58,742 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:33:58,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:33:58,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:58,748 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:33:58,748 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/B is initiating minor compaction (all files) 2024-12-12T19:33:58,748 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T19:33:58,748 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/B in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:58,748 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/6b5c96579e9f4b92b14ac86fecc733dc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/bc203251dad94e84baeb1550d03a085e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/5af54c10ffdb4b26b0d827b86e60df18, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4bf2d338be4b490b911db784a43964a0] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=47.0 K 2024-12-12T19:33:58,749 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141111 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:33:58,749 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/A is initiating minor compaction (all files) 2024-12-12T19:33:58,749 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/A in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:58,749 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/70cfedab7dac4a74afa7c4df5598ed90, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/7cb136ea601d4cfd8f6dd032a11096a9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/685f79261aeb4ee5af42be2897faf325, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/9b16dc5bea1b4ae38e0d1ab295fce839] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=137.8 K 2024-12-12T19:33:58,749 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:58,749 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/70cfedab7dac4a74afa7c4df5598ed90, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/7cb136ea601d4cfd8f6dd032a11096a9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/685f79261aeb4ee5af42be2897faf325, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/9b16dc5bea1b4ae38e0d1ab295fce839] 2024-12-12T19:33:58,752 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b5c96579e9f4b92b14ac86fecc733dc, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734032034407 2024-12-12T19:33:58,753 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70cfedab7dac4a74afa7c4df5598ed90, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734032034407 2024-12-12T19:33:58,753 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting bc203251dad94e84baeb1550d03a085e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734032035623 2024-12-12T19:33:58,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:58,755 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7cb136ea601d4cfd8f6dd032a11096a9, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734032035623 2024-12-12T19:33:58,755 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 5af54c10ffdb4b26b0d827b86e60df18, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734032036337 2024-12-12T19:33:58,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:33:58,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:58,755 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 685f79261aeb4ee5af42be2897faf325, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734032036337 2024-12-12T19:33:58,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:33:58,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:58,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:33:58,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:58,756 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b16dc5bea1b4ae38e0d1ab295fce839, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734032037655 2024-12-12T19:33:58,756 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bf2d338be4b490b911db784a43964a0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734032037655 2024-12-12T19:33:58,795 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#B#compaction#271 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:58,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128c3990fda2e84542adbe7f79ceb49ddb_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032038331/Put/seqid=0 2024-12-12T19:33:58,797 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/c22b619d3d344313a61a9ae53c0eb237 is 50, key is test_row_0/B:col10/1734032038324/Put/seqid=0 2024-12-12T19:33:58,800 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:33:58,812 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212df412aad16ee486d8e1a1d8f516449a7_a329d898f2cbc923ae8747673ded3106 store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:33:58,817 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212df412aad16ee486d8e1a1d8f516449a7_a329d898f2cbc923ae8747673ded3106, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:33:58,817 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212df412aad16ee486d8e1a1d8f516449a7_a329d898f2cbc923ae8747673ded3106 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:33:58,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742147_1323 (size=14744) 2024-12-12T19:33:58,834 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:58,849 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128c3990fda2e84542adbe7f79ceb49ddb_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128c3990fda2e84542adbe7f79ceb49ddb_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:58,855 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/c8abd332e3314b2483b2243133b882d6, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:33:58,856 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/c8abd332e3314b2483b2243133b882d6 is 175, key is test_row_0/A:col10/1734032038331/Put/seqid=0 2024-12-12T19:33:58,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742148_1324 (size=12241) 2024-12-12T19:33:58,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742149_1325 (size=4469) 2024-12-12T19:33:58,895 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#A#compaction#272 average throughput is 0.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:58,896 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/3d85c06973104845b19c306aa44c21df is 175, key is test_row_0/A:col10/1734032038324/Put/seqid=0 2024-12-12T19:33:58,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742150_1326 (size=39699) 2024-12-12T19:33:58,910 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=134, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/c8abd332e3314b2483b2243133b882d6 2024-12-12T19:33:58,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/8b878b5b73f346568ddffae276fb9566 is 50, key is test_row_0/B:col10/1734032038331/Put/seqid=0 2024-12-12T19:33:58,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:58,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032098911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:58,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:58,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032098912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:58,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:58,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032098913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:58,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742151_1327 (size=31195) 2024-12-12T19:33:59,004 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/3d85c06973104845b19c306aa44c21df as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/3d85c06973104845b19c306aa44c21df 2024-12-12T19:33:59,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742152_1328 (size=12101) 2024-12-12T19:33:59,032 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a329d898f2cbc923ae8747673ded3106/A of a329d898f2cbc923ae8747673ded3106 into 3d85c06973104845b19c306aa44c21df(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:59,032 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:59,033 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/A, priority=12, startTime=1734032038742; duration=0sec 2024-12-12T19:33:59,033 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:33:59,033 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:A 2024-12-12T19:33:59,033 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:33:59,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:59,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:59,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032099034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:59,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032099032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:59,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:59,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032099034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:59,059 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:33:59,059 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/C is initiating minor compaction (all files) 2024-12-12T19:33:59,059 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/C in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:33:59,060 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/907624b7a69d40f59d066f25f7090270, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/acc46ddabae94da7baa7f4de17a38794, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3de39448bda949ecbe5499786653a5e6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/1e8993cba70e48439ee9df2ed14e5d08] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=47.0 K 2024-12-12T19:33:59,063 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 907624b7a69d40f59d066f25f7090270, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734032034407 2024-12-12T19:33:59,064 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting acc46ddabae94da7baa7f4de17a38794, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734032035623 2024-12-12T19:33:59,064 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3de39448bda949ecbe5499786653a5e6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734032036337 2024-12-12T19:33:59,064 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e8993cba70e48439ee9df2ed14e5d08, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734032037655 2024-12-12T19:33:59,114 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#C#compaction#274 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:33:59,114 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/e080fad9a5e044d9b488a1913f656647 is 50, key is test_row_0/C:col10/1734032038324/Put/seqid=0 2024-12-12T19:33:59,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742153_1329 (size=12241) 2024-12-12T19:33:59,231 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/e080fad9a5e044d9b488a1913f656647 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/e080fad9a5e044d9b488a1913f656647 2024-12-12T19:33:59,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:59,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032099251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:59,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:59,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032099251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:59,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:59,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032099251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:59,291 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a329d898f2cbc923ae8747673ded3106/C of a329d898f2cbc923ae8747673ded3106 into e080fad9a5e044d9b488a1913f656647(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:59,291 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:59,292 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/C, priority=12, startTime=1734032038742; duration=0sec 2024-12-12T19:33:59,292 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:59,292 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:C 2024-12-12T19:33:59,333 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/c22b619d3d344313a61a9ae53c0eb237 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/c22b619d3d344313a61a9ae53c0eb237 2024-12-12T19:33:59,355 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a329d898f2cbc923ae8747673ded3106/B of a329d898f2cbc923ae8747673ded3106 into c22b619d3d344313a61a9ae53c0eb237(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:33:59,355 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:59,355 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/B, priority=12, startTime=1734032038742; duration=0sec 2024-12-12T19:33:59,355 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:33:59,355 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:B 2024-12-12T19:33:59,407 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/8b878b5b73f346568ddffae276fb9566 2024-12-12T19:33:59,472 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/951d5129edeb4346977d6b119df95662 is 50, key is test_row_0/C:col10/1734032038331/Put/seqid=0 2024-12-12T19:33:59,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742154_1330 (size=12101) 2024-12-12T19:33:59,509 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/951d5129edeb4346977d6b119df95662 2024-12-12T19:33:59,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/c8abd332e3314b2483b2243133b882d6 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/c8abd332e3314b2483b2243133b882d6 2024-12-12T19:33:59,536 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/c8abd332e3314b2483b2243133b882d6, entries=200, sequenceid=134, filesize=38.8 K 2024-12-12T19:33:59,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/8b878b5b73f346568ddffae276fb9566 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/8b878b5b73f346568ddffae276fb9566 2024-12-12T19:33:59,546 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/8b878b5b73f346568ddffae276fb9566, entries=150, sequenceid=134, filesize=11.8 K 2024-12-12T19:33:59,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/951d5129edeb4346977d6b119df95662 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/951d5129edeb4346977d6b119df95662 2024-12-12T19:33:59,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:59,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032099558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:59,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:59,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032099558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:59,564 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/951d5129edeb4346977d6b119df95662, entries=150, sequenceid=134, filesize=11.8 K 2024-12-12T19:33:59,565 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for a329d898f2cbc923ae8747673ded3106 in 817ms, sequenceid=134, compaction requested=false 2024-12-12T19:33:59,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:33:59,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:59,578 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T19:33:59,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:33:59,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:59,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:33:59,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:59,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:33:59,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:33:59,625 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212cd2770e73d7342a0a7c78d6b0b3464a2_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032038880/Put/seqid=0 2024-12-12T19:33:59,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742155_1331 (size=14794) 2024-12-12T19:33:59,680 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:33:59,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:59,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032099694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:59,722 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212cd2770e73d7342a0a7c78d6b0b3464a2_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212cd2770e73d7342a0a7c78d6b0b3464a2_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:33:59,725 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/2aca694e7f4d4625bffa2c7d91864019, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:33:59,726 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/2aca694e7f4d4625bffa2c7d91864019 is 175, key is test_row_0/A:col10/1734032038880/Put/seqid=0 2024-12-12T19:33:59,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742156_1332 (size=39749) 2024-12-12T19:33:59,783 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=160, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/2aca694e7f4d4625bffa2c7d91864019 2024-12-12T19:33:59,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032099805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:59,817 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/f339efab72314582a4ecceed101deb03 is 50, key is test_row_0/B:col10/1734032038880/Put/seqid=0 2024-12-12T19:33:59,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41144 deadline: 1734032099826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:59,831 DEBUG [Thread-1362 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4199 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:33:59,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:33:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734032099854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:33:59,860 DEBUG [Thread-1360 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4227 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:33:59,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742157_1333 (size=12151) 2024-12-12T19:33:59,878 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/f339efab72314582a4ecceed101deb03 2024-12-12T19:33:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T19:33:59,884 INFO [Thread-1370 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 96 completed 2024-12-12T19:33:59,894 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:33:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees 2024-12-12T19:33:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-12T19:33:59,896 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:33:59,897 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:33:59,897 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:33:59,931 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/3c2390f2ff8044c0a1fe329ce1e77732 is 50, key is test_row_0/C:col10/1734032038880/Put/seqid=0 2024-12-12T19:33:59,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742158_1334 (size=12151) 2024-12-12T19:33:59,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-12T19:34:00,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032100015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:00,051 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:00,053 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-12T19:34:00,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:00,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:00,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:00,053 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:00,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:00,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032100064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:00,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:00,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032100070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:00,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-12T19:34:00,215 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:00,217 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-12T19:34:00,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:00,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:00,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:00,219 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:00,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:00,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:00,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:00,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032100328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:00,365 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/3c2390f2ff8044c0a1fe329ce1e77732 2024-12-12T19:34:00,377 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:00,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-12T19:34:00,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:00,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:00,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:00,383 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:00,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:00,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:00,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/2aca694e7f4d4625bffa2c7d91864019 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/2aca694e7f4d4625bffa2c7d91864019 2024-12-12T19:34:00,420 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/2aca694e7f4d4625bffa2c7d91864019, entries=200, sequenceid=160, filesize=38.8 K 2024-12-12T19:34:00,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/f339efab72314582a4ecceed101deb03 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/f339efab72314582a4ecceed101deb03 2024-12-12T19:34:00,446 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/f339efab72314582a4ecceed101deb03, entries=150, sequenceid=160, filesize=11.9 K 2024-12-12T19:34:00,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/3c2390f2ff8044c0a1fe329ce1e77732 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3c2390f2ff8044c0a1fe329ce1e77732 2024-12-12T19:34:00,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3c2390f2ff8044c0a1fe329ce1e77732, entries=150, sequenceid=160, filesize=11.9 K 2024-12-12T19:34:00,482 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for a329d898f2cbc923ae8747673ded3106 in 903ms, sequenceid=160, compaction requested=true 2024-12-12T19:34:00,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:00,482 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:00,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:00,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:00,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:00,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:00,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:00,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:34:00,483 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:00,491 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:00,492 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/A is initiating minor compaction (all files) 2024-12-12T19:34:00,492 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/A in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:00,492 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/3d85c06973104845b19c306aa44c21df, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/c8abd332e3314b2483b2243133b882d6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/2aca694e7f4d4625bffa2c7d91864019] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=108.0 K 2024-12-12T19:34:00,492 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:00,492 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/3d85c06973104845b19c306aa44c21df, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/c8abd332e3314b2483b2243133b882d6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/2aca694e7f4d4625bffa2c7d91864019] 2024-12-12T19:34:00,495 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:00,495 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/B is initiating minor compaction (all files) 2024-12-12T19:34:00,496 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/B in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:00,496 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/c22b619d3d344313a61a9ae53c0eb237, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/8b878b5b73f346568ddffae276fb9566, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/f339efab72314582a4ecceed101deb03] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=35.6 K 2024-12-12T19:34:00,496 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d85c06973104845b19c306aa44c21df, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734032037655 2024-12-12T19:34:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-12T19:34:00,503 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8abd332e3314b2483b2243133b882d6, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1734032038331 2024-12-12T19:34:00,503 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting c22b619d3d344313a61a9ae53c0eb237, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734032037655 2024-12-12T19:34:00,511 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b878b5b73f346568ddffae276fb9566, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1734032038331 2024-12-12T19:34:00,512 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2aca694e7f4d4625bffa2c7d91864019, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734032038880 2024-12-12T19:34:00,512 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting f339efab72314582a4ecceed101deb03, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734032038880 2024-12-12T19:34:00,542 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:00,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-12T19:34:00,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:00,544 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T19:34:00,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:34:00,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:00,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:34:00,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:00,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:34:00,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:00,582 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#B#compaction#279 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:00,583 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/8934fd4b0e8c4a0da0c330b421a68926 is 50, key is test_row_0/B:col10/1734032038880/Put/seqid=0 2024-12-12T19:34:00,610 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:00,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212664c9d06bcca4dceba8d77e7c1887099_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032039686/Put/seqid=0 2024-12-12T19:34:00,635 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212549f89cb485f4d8ab9d5692734d735e7_a329d898f2cbc923ae8747673ded3106 store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:00,637 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212549f89cb485f4d8ab9d5692734d735e7_a329d898f2cbc923ae8747673ded3106, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:00,637 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212549f89cb485f4d8ab9d5692734d735e7_a329d898f2cbc923ae8747673ded3106 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:00,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742159_1335 (size=12493) 2024-12-12T19:34:00,675 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/8934fd4b0e8c4a0da0c330b421a68926 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/8934fd4b0e8c4a0da0c330b421a68926 2024-12-12T19:34:00,684 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/B of a329d898f2cbc923ae8747673ded3106 into 8934fd4b0e8c4a0da0c330b421a68926(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:00,684 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:00,684 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/B, priority=13, startTime=1734032040482; duration=0sec 2024-12-12T19:34:00,684 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:00,684 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:B 2024-12-12T19:34:00,684 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:00,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742160_1336 (size=12304) 2024-12-12T19:34:00,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:00,701 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:00,701 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/C is initiating minor compaction (all files) 2024-12-12T19:34:00,701 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/C in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:00,702 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/e080fad9a5e044d9b488a1913f656647, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/951d5129edeb4346977d6b119df95662, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3c2390f2ff8044c0a1fe329ce1e77732] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=35.6 K 2024-12-12T19:34:00,705 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting e080fad9a5e044d9b488a1913f656647, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734032037655 2024-12-12T19:34:00,707 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 951d5129edeb4346977d6b119df95662, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1734032038331 2024-12-12T19:34:00,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742161_1337 (size=4469) 2024-12-12T19:34:00,715 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c2390f2ff8044c0a1fe329ce1e77732, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734032038880 2024-12-12T19:34:00,727 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#A#compaction#280 average throughput is 0.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:00,728 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/63bad1887164458bb2ee3794c720ce0e is 175, key is test_row_0/A:col10/1734032038880/Put/seqid=0 2024-12-12T19:34:00,737 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212664c9d06bcca4dceba8d77e7c1887099_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212664c9d06bcca4dceba8d77e7c1887099_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:00,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/c4eb17d1cec9447b8ee122254e43c5d2, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:00,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/c4eb17d1cec9447b8ee122254e43c5d2 is 175, key is test_row_0/A:col10/1734032039686/Put/seqid=0 2024-12-12T19:34:00,758 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#C#compaction#282 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:00,758 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/8734c44a9a04445e991043b05ffef3dd is 50, key is test_row_0/C:col10/1734032038880/Put/seqid=0 2024-12-12T19:34:00,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742162_1338 (size=31447) 2024-12-12T19:34:00,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742163_1339 (size=31105) 2024-12-12T19:34:00,768 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=173, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/c4eb17d1cec9447b8ee122254e43c5d2 2024-12-12T19:34:00,782 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/63bad1887164458bb2ee3794c720ce0e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/63bad1887164458bb2ee3794c720ce0e 2024-12-12T19:34:00,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/3c0c17a31f1f4533974237a56a18f1a4 is 50, key is test_row_0/B:col10/1734032039686/Put/seqid=0 2024-12-12T19:34:00,798 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/A of a329d898f2cbc923ae8747673ded3106 into 63bad1887164458bb2ee3794c720ce0e(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:00,798 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:00,798 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/A, priority=13, startTime=1734032040482; duration=0sec 2024-12-12T19:34:00,798 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:00,798 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:A 2024-12-12T19:34:00,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742164_1340 (size=12493) 2024-12-12T19:34:00,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742165_1341 (size=12151) 2024-12-12T19:34:00,850 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/3c0c17a31f1f4533974237a56a18f1a4 2024-12-12T19:34:00,859 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/8734c44a9a04445e991043b05ffef3dd as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/8734c44a9a04445e991043b05ffef3dd 2024-12-12T19:34:00,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:00,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:00,872 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T19:34:00,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/c2ba9604e2a847868cccba5fbbf2fd45 is 50, key is test_row_0/C:col10/1734032039686/Put/seqid=0 2024-12-12T19:34:00,879 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/C of a329d898f2cbc923ae8747673ded3106 into 8734c44a9a04445e991043b05ffef3dd(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:00,880 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:00,880 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/C, priority=13, startTime=1734032040483; duration=0sec 2024-12-12T19:34:00,880 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:00,880 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:C 2024-12-12T19:34:00,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742166_1342 (size=12151) 2024-12-12T19:34:00,906 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/c2ba9604e2a847868cccba5fbbf2fd45 2024-12-12T19:34:00,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/c4eb17d1cec9447b8ee122254e43c5d2 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/c4eb17d1cec9447b8ee122254e43c5d2 2024-12-12T19:34:00,938 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/c4eb17d1cec9447b8ee122254e43c5d2, entries=150, sequenceid=173, filesize=30.4 K 2024-12-12T19:34:00,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/3c0c17a31f1f4533974237a56a18f1a4 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/3c0c17a31f1f4533974237a56a18f1a4 2024-12-12T19:34:00,944 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/3c0c17a31f1f4533974237a56a18f1a4, entries=150, sequenceid=173, filesize=11.9 K 2024-12-12T19:34:00,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/c2ba9604e2a847868cccba5fbbf2fd45 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/c2ba9604e2a847868cccba5fbbf2fd45 2024-12-12T19:34:00,949 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/c2ba9604e2a847868cccba5fbbf2fd45, entries=150, sequenceid=173, filesize=11.9 K 2024-12-12T19:34:00,959 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=60.38 KB/61830 for a329d898f2cbc923ae8747673ded3106 in 415ms, sequenceid=173, compaction requested=false 2024-12-12T19:34:00,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:00,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:00,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=99 2024-12-12T19:34:00,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=99 2024-12-12T19:34:00,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:00,967 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T19:34:00,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:34:00,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:00,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:34:00,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:00,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:34:00,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:00,969 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-12-12T19:34:00,969 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0670 sec 2024-12-12T19:34:00,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees in 1.0760 sec 2024-12-12T19:34:00,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120f01154904504d6d9f1d66c13d60e928_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032040945/Put/seqid=0 2024-12-12T19:34:01,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-12T19:34:01,004 INFO [Thread-1370 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-12-12T19:34:01,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742167_1343 (size=12304) 2024-12-12T19:34:01,007 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:01,009 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:01,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees 2024-12-12T19:34:01,014 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:01,015 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:01,015 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:01,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-12T19:34:01,022 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120f01154904504d6d9f1d66c13d60e928_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120f01154904504d6d9f1d66c13d60e928_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:01,028 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/ab92fcc26bcd44eea55f5e9eb6a0aa3d, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:01,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/ab92fcc26bcd44eea55f5e9eb6a0aa3d is 175, key is test_row_0/A:col10/1734032040945/Put/seqid=0 2024-12-12T19:34:01,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742168_1344 (size=31105) 2024-12-12T19:34:01,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:01,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032101089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:01,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:01,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032101093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:01,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:01,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032101099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:01,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-12T19:34:01,167 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:01,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-12T19:34:01,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:01,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:01,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:01,169 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032101194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:01,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:01,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032101201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-12T19:34:01,321 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:01,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-12T19:34:01,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:01,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:01,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:01,322 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:01,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032101400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:01,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:01,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032101413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:01,462 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=189, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/ab92fcc26bcd44eea55f5e9eb6a0aa3d 2024-12-12T19:34:01,474 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:01,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-12T19:34:01,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:01,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:01,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:01,474 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,477 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/298f7c8b875048988aa7616c717faeac is 50, key is test_row_0/B:col10/1734032040945/Put/seqid=0 2024-12-12T19:34:01,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742169_1345 (size=12151) 2024-12-12T19:34:01,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-12T19:34:01,630 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:01,630 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-12T19:34:01,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:01,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:01,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:01,630 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:01,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032101712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:01,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:01,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032101727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:01,791 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:01,792 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-12T19:34:01,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:01,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:01,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:01,793 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,895 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/298f7c8b875048988aa7616c717faeac 2024-12-12T19:34:01,941 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/950d15dd1bf44f37964d72fb82eba781 is 50, key is test_row_0/C:col10/1734032040945/Put/seqid=0 2024-12-12T19:34:01,945 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:01,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-12T19:34:01,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:01,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:01,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:01,948 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:01,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742170_1346 (size=12151) 2024-12-12T19:34:02,005 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/950d15dd1bf44f37964d72fb82eba781 2024-12-12T19:34:02,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/ab92fcc26bcd44eea55f5e9eb6a0aa3d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/ab92fcc26bcd44eea55f5e9eb6a0aa3d 2024-12-12T19:34:02,055 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/ab92fcc26bcd44eea55f5e9eb6a0aa3d, entries=150, sequenceid=189, filesize=30.4 K 2024-12-12T19:34:02,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/298f7c8b875048988aa7616c717faeac as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/298f7c8b875048988aa7616c717faeac 2024-12-12T19:34:02,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/298f7c8b875048988aa7616c717faeac, entries=150, sequenceid=189, filesize=11.9 K 2024-12-12T19:34:02,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/950d15dd1bf44f37964d72fb82eba781 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/950d15dd1bf44f37964d72fb82eba781 2024-12-12T19:34:02,076 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/950d15dd1bf44f37964d72fb82eba781, entries=150, sequenceid=189, filesize=11.9 K 2024-12-12T19:34:02,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for a329d898f2cbc923ae8747673ded3106 in 1115ms, sequenceid=189, compaction requested=true 2024-12-12T19:34:02,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:02,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:02,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:02,083 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:02,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:02,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:02,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:02,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:34:02,083 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:02,084 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:02,084 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/B is initiating minor compaction (all files) 2024-12-12T19:34:02,084 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/B in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:02,084 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/8934fd4b0e8c4a0da0c330b421a68926, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/3c0c17a31f1f4533974237a56a18f1a4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/298f7c8b875048988aa7616c717faeac] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=35.9 K 2024-12-12T19:34:02,085 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93657 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:02,085 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/A is initiating minor compaction (all files) 2024-12-12T19:34:02,085 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/A in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:02,085 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/63bad1887164458bb2ee3794c720ce0e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/c4eb17d1cec9447b8ee122254e43c5d2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/ab92fcc26bcd44eea55f5e9eb6a0aa3d] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=91.5 K 2024-12-12T19:34:02,085 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:02,085 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/63bad1887164458bb2ee3794c720ce0e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/c4eb17d1cec9447b8ee122254e43c5d2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/ab92fcc26bcd44eea55f5e9eb6a0aa3d] 2024-12-12T19:34:02,085 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 8934fd4b0e8c4a0da0c330b421a68926, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734032038880 2024-12-12T19:34:02,085 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63bad1887164458bb2ee3794c720ce0e, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734032038880 2024-12-12T19:34:02,085 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c0c17a31f1f4533974237a56a18f1a4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1734032039648 2024-12-12T19:34:02,086 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4eb17d1cec9447b8ee122254e43c5d2, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1734032039648 2024-12-12T19:34:02,086 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 298f7c8b875048988aa7616c717faeac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1734032040929 2024-12-12T19:34:02,087 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab92fcc26bcd44eea55f5e9eb6a0aa3d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1734032040929 2024-12-12T19:34:02,101 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:02,101 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-12T19:34:02,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:02,101 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T19:34:02,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:34:02,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:02,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:34:02,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:02,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:34:02,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:02,104 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:02,112 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#B#compaction#289 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:02,113 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/5e7520040c1e43f7a325accd59a15379 is 50, key is test_row_0/B:col10/1734032040945/Put/seqid=0 2024-12-12T19:34:02,116 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212c19da7e809ba46d2ad722dedf9b186d0_a329d898f2cbc923ae8747673ded3106 store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:02,118 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212c19da7e809ba46d2ad722dedf9b186d0_a329d898f2cbc923ae8747673ded3106, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:02,118 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c19da7e809ba46d2ad722dedf9b186d0_a329d898f2cbc923ae8747673ded3106 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:02,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-12T19:34:02,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c4d4501cd95e4bd0a60aedd796451739_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032041063/Put/seqid=0 2024-12-12T19:34:02,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742171_1347 (size=12595) 2024-12-12T19:34:02,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742173_1349 (size=12304) 2024-12-12T19:34:02,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742172_1348 (size=4469) 2024-12-12T19:34:02,199 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#A#compaction#288 average throughput is 0.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:02,200 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/9f54219c6fb9432f8e0eb517c14af386 is 175, key is test_row_0/A:col10/1734032040945/Put/seqid=0 2024-12-12T19:34:02,219 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/5e7520040c1e43f7a325accd59a15379 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/5e7520040c1e43f7a325accd59a15379 2024-12-12T19:34:02,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742174_1350 (size=31549) 2024-12-12T19:34:02,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:02,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:02,263 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/B of a329d898f2cbc923ae8747673ded3106 into 5e7520040c1e43f7a325accd59a15379(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:02,263 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:02,263 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/B, priority=13, startTime=1734032042083; duration=0sec 2024-12-12T19:34:02,264 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:02,310 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:B 2024-12-12T19:34:02,310 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:02,316 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/9f54219c6fb9432f8e0eb517c14af386 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/9f54219c6fb9432f8e0eb517c14af386 2024-12-12T19:34:02,324 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:02,324 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/C is initiating minor compaction (all files) 2024-12-12T19:34:02,324 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/C in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:02,325 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/8734c44a9a04445e991043b05ffef3dd, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/c2ba9604e2a847868cccba5fbbf2fd45, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/950d15dd1bf44f37964d72fb82eba781] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=35.9 K 2024-12-12T19:34:02,328 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 8734c44a9a04445e991043b05ffef3dd, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734032038880 2024-12-12T19:34:02,332 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting c2ba9604e2a847868cccba5fbbf2fd45, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1734032039648 2024-12-12T19:34:02,336 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 950d15dd1bf44f37964d72fb82eba781, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1734032040929 2024-12-12T19:34:02,354 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/A of a329d898f2cbc923ae8747673ded3106 into 9f54219c6fb9432f8e0eb517c14af386(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:02,355 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:02,355 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/A, priority=13, startTime=1734032042083; duration=0sec 2024-12-12T19:34:02,355 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:02,355 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:A 2024-12-12T19:34:02,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:02,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032102356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:02,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:02,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032102368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:02,377 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#C#compaction#291 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:02,378 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/92cdaecaf4d04c6bb84dcc5631b94ba5 is 50, key is test_row_0/C:col10/1734032040945/Put/seqid=0 2024-12-12T19:34:02,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742175_1351 (size=12595) 2024-12-12T19:34:02,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:02,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032102481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:02,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:02,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032102481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:02,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:02,616 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c4d4501cd95e4bd0a60aedd796451739_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c4d4501cd95e4bd0a60aedd796451739_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:02,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/a591d9cbcd26462f87dfee7c692d8a79, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:02,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/a591d9cbcd26462f87dfee7c692d8a79 is 175, key is test_row_0/A:col10/1734032041063/Put/seqid=0 2024-12-12T19:34:02,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742176_1352 (size=31105) 2024-12-12T19:34:02,701 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:02,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032102692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:02,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:02,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032102698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:02,901 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/92cdaecaf4d04c6bb84dcc5631b94ba5 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/92cdaecaf4d04c6bb84dcc5631b94ba5 2024-12-12T19:34:02,953 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/C of a329d898f2cbc923ae8747673ded3106 into 92cdaecaf4d04c6bb84dcc5631b94ba5(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:02,953 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:02,953 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/C, priority=13, startTime=1734032042083; duration=0sec 2024-12-12T19:34:02,953 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:02,953 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:C 2024-12-12T19:34:03,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:03,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032103009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:03,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:03,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032103009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:03,079 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=212, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/a591d9cbcd26462f87dfee7c692d8a79 2024-12-12T19:34:03,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/66b9b0c700c945c5aa90ef1fef3d2548 is 50, key is test_row_0/B:col10/1734032041063/Put/seqid=0 2024-12-12T19:34:03,118 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:03,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032103111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:03,120 DEBUG [Thread-1364 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4208 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:34:03,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-12T19:34:03,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742177_1353 (size=12151) 2024-12-12T19:34:03,138 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/66b9b0c700c945c5aa90ef1fef3d2548 2024-12-12T19:34:03,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/2cb9b1cf03094a52965dd01753da1763 is 50, key is test_row_0/C:col10/1734032041063/Put/seqid=0 2024-12-12T19:34:03,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742178_1354 (size=12151) 2024-12-12T19:34:03,203 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/2cb9b1cf03094a52965dd01753da1763 2024-12-12T19:34:03,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/a591d9cbcd26462f87dfee7c692d8a79 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/a591d9cbcd26462f87dfee7c692d8a79 2024-12-12T19:34:03,230 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/a591d9cbcd26462f87dfee7c692d8a79, entries=150, sequenceid=212, filesize=30.4 K 2024-12-12T19:34:03,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/66b9b0c700c945c5aa90ef1fef3d2548 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/66b9b0c700c945c5aa90ef1fef3d2548 2024-12-12T19:34:03,244 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/66b9b0c700c945c5aa90ef1fef3d2548, entries=150, sequenceid=212, filesize=11.9 K 2024-12-12T19:34:03,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/2cb9b1cf03094a52965dd01753da1763 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/2cb9b1cf03094a52965dd01753da1763 2024-12-12T19:34:03,250 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/2cb9b1cf03094a52965dd01753da1763, entries=150, sequenceid=212, filesize=11.9 K 2024-12-12T19:34:03,254 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for a329d898f2cbc923ae8747673ded3106 in 1153ms, sequenceid=212, compaction requested=false 2024-12-12T19:34:03,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:03,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:03,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-12-12T19:34:03,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=101 2024-12-12T19:34:03,263 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-12-12T19:34:03,264 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2470 sec 2024-12-12T19:34:03,265 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees in 2.2550 sec 2024-12-12T19:34:03,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:03,526 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T19:34:03,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:34:03,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:03,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:34:03,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:03,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:34:03,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:03,536 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121224eaf5865f6742d9a46259081c210bd0_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032043525/Put/seqid=0 2024-12-12T19:34:03,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742179_1355 (size=17284) 2024-12-12T19:34:03,579 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:03,594 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121224eaf5865f6742d9a46259081c210bd0_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121224eaf5865f6742d9a46259081c210bd0_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:03,595 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/912d89e63a3d46c9b18e9d7ba781d8d6, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:03,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/912d89e63a3d46c9b18e9d7ba781d8d6 is 175, key is test_row_0/A:col10/1734032043525/Put/seqid=0 2024-12-12T19:34:03,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742180_1356 (size=48389) 2024-12-12T19:34:03,618 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=230, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/912d89e63a3d46c9b18e9d7ba781d8d6 2024-12-12T19:34:03,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:03,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032103619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:03,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:03,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032103621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:03,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/d8b87aeb6537439e9df733680245fd7f is 50, key is test_row_0/B:col10/1734032043525/Put/seqid=0 2024-12-12T19:34:03,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742181_1357 (size=12151) 2024-12-12T19:34:03,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:03,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032103732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:03,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:03,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032103735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:03,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:03,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41144 deadline: 1734032103856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:03,859 DEBUG [Thread-1362 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8227 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:34:03,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:03,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734032103871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:03,883 DEBUG [Thread-1360 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8250 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:34:03,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:03,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032103947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:03,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:03,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032103964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:04,123 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/d8b87aeb6537439e9df733680245fd7f 2024-12-12T19:34:04,156 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/30cd9ef28d7f4dc9ab6cc90c57bf8b70 is 50, key is test_row_0/C:col10/1734032043525/Put/seqid=0 2024-12-12T19:34:04,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742182_1358 (size=12151) 2024-12-12T19:34:04,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:04,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032104262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:04,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:04,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032104267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:04,615 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/30cd9ef28d7f4dc9ab6cc90c57bf8b70 2024-12-12T19:34:04,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/912d89e63a3d46c9b18e9d7ba781d8d6 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/912d89e63a3d46c9b18e9d7ba781d8d6 2024-12-12T19:34:04,649 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/912d89e63a3d46c9b18e9d7ba781d8d6, entries=250, sequenceid=230, filesize=47.3 K 2024-12-12T19:34:04,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/d8b87aeb6537439e9df733680245fd7f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d8b87aeb6537439e9df733680245fd7f 2024-12-12T19:34:04,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d8b87aeb6537439e9df733680245fd7f, entries=150, sequenceid=230, filesize=11.9 K 2024-12-12T19:34:04,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/30cd9ef28d7f4dc9ab6cc90c57bf8b70 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/30cd9ef28d7f4dc9ab6cc90c57bf8b70 2024-12-12T19:34:04,717 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/30cd9ef28d7f4dc9ab6cc90c57bf8b70, entries=150, sequenceid=230, filesize=11.9 K 2024-12-12T19:34:04,719 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for a329d898f2cbc923ae8747673ded3106 in 1193ms, sequenceid=230, compaction requested=true 2024-12-12T19:34:04,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:04,719 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:04,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:04,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:04,719 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:04,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:04,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:04,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:04,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:04,739 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:04,739 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/B is initiating minor compaction (all files) 2024-12-12T19:34:04,739 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/B in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:04,739 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/5e7520040c1e43f7a325accd59a15379, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/66b9b0c700c945c5aa90ef1fef3d2548, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d8b87aeb6537439e9df733680245fd7f] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=36.0 K 2024-12-12T19:34:04,739 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111043 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:04,739 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/A is initiating minor compaction (all files) 2024-12-12T19:34:04,739 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/A in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:04,740 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/9f54219c6fb9432f8e0eb517c14af386, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/a591d9cbcd26462f87dfee7c692d8a79, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/912d89e63a3d46c9b18e9d7ba781d8d6] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=108.4 K 2024-12-12T19:34:04,740 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:04,740 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/9f54219c6fb9432f8e0eb517c14af386, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/a591d9cbcd26462f87dfee7c692d8a79, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/912d89e63a3d46c9b18e9d7ba781d8d6] 2024-12-12T19:34:04,747 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e7520040c1e43f7a325accd59a15379, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1734032040929 2024-12-12T19:34:04,748 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f54219c6fb9432f8e0eb517c14af386, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1734032040929 2024-12-12T19:34:04,756 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting a591d9cbcd26462f87dfee7c692d8a79, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1734032041063 2024-12-12T19:34:04,759 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 66b9b0c700c945c5aa90ef1fef3d2548, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1734032041063 2024-12-12T19:34:04,764 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 912d89e63a3d46c9b18e9d7ba781d8d6, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734032042263 2024-12-12T19:34:04,773 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting d8b87aeb6537439e9df733680245fd7f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734032042263 2024-12-12T19:34:04,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:04,779 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T19:34:04,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:34:04,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:04,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:34:04,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:04,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:34:04,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:04,808 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:04,810 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#B#compaction#298 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:04,811 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/28ddea329f954379a5390b3df11db9fa is 50, key is test_row_0/B:col10/1734032043525/Put/seqid=0 2024-12-12T19:34:04,824 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412122d8eb60aeaf54e798c741447a927f9f1_a329d898f2cbc923ae8747673ded3106 store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:04,826 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412122d8eb60aeaf54e798c741447a927f9f1_a329d898f2cbc923ae8747673ded3106, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:04,827 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412122d8eb60aeaf54e798c741447a927f9f1_a329d898f2cbc923ae8747673ded3106 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:04,850 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121254e5e5c671234422bb4bc9ee86984966_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032043590/Put/seqid=0 2024-12-12T19:34:04,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:04,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032104859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:04,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:04,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032104861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:04,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742183_1359 (size=12697) 2024-12-12T19:34:04,893 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/28ddea329f954379a5390b3df11db9fa as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/28ddea329f954379a5390b3df11db9fa 2024-12-12T19:34:04,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742184_1360 (size=4469) 2024-12-12T19:34:04,903 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#A#compaction#297 average throughput is 0.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:04,904 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/B of a329d898f2cbc923ae8747673ded3106 into 28ddea329f954379a5390b3df11db9fa(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:04,904 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:04,904 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/14e1ded481d54e8cbf5f42694a48e394 is 175, key is test_row_0/A:col10/1734032043525/Put/seqid=0 2024-12-12T19:34:04,904 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/B, priority=13, startTime=1734032044719; duration=0sec 2024-12-12T19:34:04,904 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:04,904 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:B 2024-12-12T19:34:04,904 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:04,906 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:04,906 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/C is initiating minor compaction (all files) 2024-12-12T19:34:04,907 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/C in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:04,907 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/92cdaecaf4d04c6bb84dcc5631b94ba5, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/2cb9b1cf03094a52965dd01753da1763, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/30cd9ef28d7f4dc9ab6cc90c57bf8b70] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=36.0 K 2024-12-12T19:34:04,907 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 92cdaecaf4d04c6bb84dcc5631b94ba5, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1734032040929 2024-12-12T19:34:04,909 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 2cb9b1cf03094a52965dd01753da1763, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1734032041063 2024-12-12T19:34:04,910 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 30cd9ef28d7f4dc9ab6cc90c57bf8b70, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734032042263 2024-12-12T19:34:04,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742185_1361 (size=14794) 2024-12-12T19:34:04,928 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:04,948 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#C#compaction#300 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:04,948 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/a79481e105be4ea7a6562afa90c590b7 is 50, key is test_row_0/C:col10/1734032043525/Put/seqid=0 2024-12-12T19:34:04,960 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121254e5e5c671234422bb4bc9ee86984966_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121254e5e5c671234422bb4bc9ee86984966_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:04,961 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/22445e28227341589b9f1e481af057eb, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:04,962 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/22445e28227341589b9f1e481af057eb is 175, key is test_row_0/A:col10/1734032043590/Put/seqid=0 2024-12-12T19:34:04,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:04,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032104962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:04,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:04,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032104975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:04,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742187_1363 (size=12697) 2024-12-12T19:34:05,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742186_1362 (size=31651) 2024-12-12T19:34:05,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742188_1364 (size=39749) 2024-12-12T19:34:05,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-12T19:34:05,131 INFO [Thread-1370 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-12-12T19:34:05,135 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:05,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-12-12T19:34:05,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-12T19:34:05,147 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:05,151 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:05,151 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:05,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:05,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032105177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:05,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:05,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032105190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:05,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-12T19:34:05,303 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:05,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-12T19:34:05,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:05,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:05,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:05,305 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:05,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:05,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:05,417 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=252, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/22445e28227341589b9f1e481af057eb 2024-12-12T19:34:05,427 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/a79481e105be4ea7a6562afa90c590b7 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/a79481e105be4ea7a6562afa90c590b7 2024-12-12T19:34:05,442 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/14e1ded481d54e8cbf5f42694a48e394 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/14e1ded481d54e8cbf5f42694a48e394 2024-12-12T19:34:05,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-12T19:34:05,447 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/C of a329d898f2cbc923ae8747673ded3106 into a79481e105be4ea7a6562afa90c590b7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:05,447 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:05,447 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/C, priority=13, startTime=1734032044719; duration=0sec 2024-12-12T19:34:05,447 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:05,447 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:C 2024-12-12T19:34:05,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/75c70efba2a242628dd704f28a55e11f is 50, key is test_row_0/B:col10/1734032043590/Put/seqid=0 2024-12-12T19:34:05,452 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/A of a329d898f2cbc923ae8747673ded3106 into 14e1ded481d54e8cbf5f42694a48e394(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:05,452 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:05,452 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/A, priority=13, startTime=1734032044719; duration=0sec 2024-12-12T19:34:05,452 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:05,452 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:A 2024-12-12T19:34:05,459 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:05,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-12T19:34:05,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:05,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:05,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:05,463 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:05,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:05,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:05,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742189_1365 (size=12151) 2024-12-12T19:34:05,485 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/75c70efba2a242628dd704f28a55e11f 2024-12-12T19:34:05,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:05,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032105487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:05,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:05,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032105503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:05,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/3d15302c329e4798b1b8f827f9d5614a is 50, key is test_row_0/C:col10/1734032043590/Put/seqid=0 2024-12-12T19:34:05,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742190_1366 (size=12151) 2024-12-12T19:34:05,568 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/3d15302c329e4798b1b8f827f9d5614a 2024-12-12T19:34:05,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/22445e28227341589b9f1e481af057eb as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/22445e28227341589b9f1e481af057eb 2024-12-12T19:34:05,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/22445e28227341589b9f1e481af057eb, entries=200, sequenceid=252, filesize=38.8 K 2024-12-12T19:34:05,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/75c70efba2a242628dd704f28a55e11f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/75c70efba2a242628dd704f28a55e11f 2024-12-12T19:34:05,615 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:05,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-12T19:34:05,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:05,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:05,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:05,619 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:05,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:05,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:05,630 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/75c70efba2a242628dd704f28a55e11f, entries=150, sequenceid=252, filesize=11.9 K 2024-12-12T19:34:05,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/3d15302c329e4798b1b8f827f9d5614a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3d15302c329e4798b1b8f827f9d5614a 2024-12-12T19:34:05,641 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3d15302c329e4798b1b8f827f9d5614a, entries=150, sequenceid=252, filesize=11.9 K 2024-12-12T19:34:05,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for a329d898f2cbc923ae8747673ded3106 in 869ms, sequenceid=252, compaction requested=false 2024-12-12T19:34:05,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:05,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-12T19:34:05,774 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:05,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-12T19:34:05,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:05,777 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T19:34:05,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:34:05,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:05,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:34:05,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:05,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:34:05,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:05,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a38788adb5214ff98f6b106499ad5fb8_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032044859/Put/seqid=0 2024-12-12T19:34:05,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742191_1367 (size=12454) 2024-12-12T19:34:05,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:05,891 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a38788adb5214ff98f6b106499ad5fb8_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a38788adb5214ff98f6b106499ad5fb8_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:05,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/61986d3b7ef34a94bc956d82e8375f62, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:05,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/61986d3b7ef34a94bc956d82e8375f62 is 175, key is test_row_0/A:col10/1734032044859/Put/seqid=0 2024-12-12T19:34:05,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742192_1368 (size=31255) 2024-12-12T19:34:06,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:06,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:06,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032106173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:06,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032106175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:06,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-12T19:34:06,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:06,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032106278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:06,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032106291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:06,313 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=269, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/61986d3b7ef34a94bc956d82e8375f62 2024-12-12T19:34:06,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/4d5879440c094209aa3960c93e77d22d is 50, key is test_row_0/B:col10/1734032044859/Put/seqid=0 2024-12-12T19:34:06,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742193_1369 (size=12301) 2024-12-12T19:34:06,377 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/4d5879440c094209aa3960c93e77d22d 2024-12-12T19:34:06,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/3e58d87bdf564a4b8039e1d8e2ae0354 is 50, key is test_row_0/C:col10/1734032044859/Put/seqid=0 2024-12-12T19:34:06,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742194_1370 (size=12301) 2024-12-12T19:34:06,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:06,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032106483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:06,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:06,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032106503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:06,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:06,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032106790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:06,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:06,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032106815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:06,823 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/3e58d87bdf564a4b8039e1d8e2ae0354 2024-12-12T19:34:06,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/61986d3b7ef34a94bc956d82e8375f62 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/61986d3b7ef34a94bc956d82e8375f62 2024-12-12T19:34:06,840 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/61986d3b7ef34a94bc956d82e8375f62, entries=150, sequenceid=269, filesize=30.5 K 2024-12-12T19:34:06,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/4d5879440c094209aa3960c93e77d22d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4d5879440c094209aa3960c93e77d22d 2024-12-12T19:34:06,846 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4d5879440c094209aa3960c93e77d22d, entries=150, sequenceid=269, filesize=12.0 K 2024-12-12T19:34:06,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/3e58d87bdf564a4b8039e1d8e2ae0354 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3e58d87bdf564a4b8039e1d8e2ae0354 2024-12-12T19:34:06,854 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3e58d87bdf564a4b8039e1d8e2ae0354, entries=150, sequenceid=269, filesize=12.0 K 2024-12-12T19:34:06,856 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for a329d898f2cbc923ae8747673ded3106 in 1079ms, sequenceid=269, compaction requested=true 2024-12-12T19:34:06,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:06,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:06,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-12T19:34:06,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-12-12T19:34:06,862 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-12-12T19:34:06,862 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7090 sec 2024-12-12T19:34:06,864 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 1.7280 sec 2024-12-12T19:34:07,082 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/4c9c438b6eeb:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0280754184d94d21bba84e675e1b1c6f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/5f1bff8f59584e8fb8ae0ca205ddf40a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/70cfedab7dac4a74afa7c4df5598ed90, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0701f7a1f360448f9a26b7efe485718e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/7cb136ea601d4cfd8f6dd032a11096a9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/685f79261aeb4ee5af42be2897faf325, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/9b16dc5bea1b4ae38e0d1ab295fce839, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/3d85c06973104845b19c306aa44c21df, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/c8abd332e3314b2483b2243133b882d6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/2aca694e7f4d4625bffa2c7d91864019, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/63bad1887164458bb2ee3794c720ce0e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/c4eb17d1cec9447b8ee122254e43c5d2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/9f54219c6fb9432f8e0eb517c14af386, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/ab92fcc26bcd44eea55f5e9eb6a0aa3d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/a591d9cbcd26462f87dfee7c692d8a79, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/912d89e63a3d46c9b18e9d7ba781d8d6] to archive 2024-12-12T19:34:07,089 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/4c9c438b6eeb:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:34:07,100 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/5f1bff8f59584e8fb8ae0ca205ddf40a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/5f1bff8f59584e8fb8ae0ca205ddf40a 2024-12-12T19:34:07,100 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/3d85c06973104845b19c306aa44c21df to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/3d85c06973104845b19c306aa44c21df 2024-12-12T19:34:07,100 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/9b16dc5bea1b4ae38e0d1ab295fce839 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/9b16dc5bea1b4ae38e0d1ab295fce839 2024-12-12T19:34:07,101 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/7cb136ea601d4cfd8f6dd032a11096a9 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/7cb136ea601d4cfd8f6dd032a11096a9 2024-12-12T19:34:07,101 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/685f79261aeb4ee5af42be2897faf325 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/685f79261aeb4ee5af42be2897faf325 2024-12-12T19:34:07,101 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/c8abd332e3314b2483b2243133b882d6 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/c8abd332e3314b2483b2243133b882d6 2024-12-12T19:34:07,102 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/63bad1887164458bb2ee3794c720ce0e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/63bad1887164458bb2ee3794c720ce0e 2024-12-12T19:34:07,103 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/2aca694e7f4d4625bffa2c7d91864019 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/2aca694e7f4d4625bffa2c7d91864019 2024-12-12T19:34:07,103 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0280754184d94d21bba84e675e1b1c6f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0280754184d94d21bba84e675e1b1c6f 2024-12-12T19:34:07,104 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/a591d9cbcd26462f87dfee7c692d8a79 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/a591d9cbcd26462f87dfee7c692d8a79 2024-12-12T19:34:07,104 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/9f54219c6fb9432f8e0eb517c14af386 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/9f54219c6fb9432f8e0eb517c14af386 2024-12-12T19:34:07,104 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/ab92fcc26bcd44eea55f5e9eb6a0aa3d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/ab92fcc26bcd44eea55f5e9eb6a0aa3d 2024-12-12T19:34:07,111 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0701f7a1f360448f9a26b7efe485718e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0701f7a1f360448f9a26b7efe485718e 2024-12-12T19:34:07,111 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/70cfedab7dac4a74afa7c4df5598ed90 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/70cfedab7dac4a74afa7c4df5598ed90 2024-12-12T19:34:07,113 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/912d89e63a3d46c9b18e9d7ba781d8d6 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/912d89e63a3d46c9b18e9d7ba781d8d6 2024-12-12T19:34:07,116 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/c4eb17d1cec9447b8ee122254e43c5d2 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/c4eb17d1cec9447b8ee122254e43c5d2 2024-12-12T19:34:07,122 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/4c9c438b6eeb:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/9c77965a306b4986a777b61157371cfe, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/54a01e7ea1bd42ed8f86e260afe19ee4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/6b5c96579e9f4b92b14ac86fecc733dc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/eeb97d9270d5490685fafdb2d7e92750, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/bc203251dad94e84baeb1550d03a085e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/5af54c10ffdb4b26b0d827b86e60df18, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/c22b619d3d344313a61a9ae53c0eb237, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4bf2d338be4b490b911db784a43964a0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/8b878b5b73f346568ddffae276fb9566, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/8934fd4b0e8c4a0da0c330b421a68926, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/f339efab72314582a4ecceed101deb03, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/3c0c17a31f1f4533974237a56a18f1a4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/5e7520040c1e43f7a325accd59a15379, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/298f7c8b875048988aa7616c717faeac, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/66b9b0c700c945c5aa90ef1fef3d2548, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d8b87aeb6537439e9df733680245fd7f] to archive 2024-12-12T19:34:07,127 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/4c9c438b6eeb:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:34:07,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:07,134 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T19:34:07,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:34:07,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:07,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:34:07,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:07,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:34:07,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:07,148 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/eeb97d9270d5490685fafdb2d7e92750 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/eeb97d9270d5490685fafdb2d7e92750 2024-12-12T19:34:07,148 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/bc203251dad94e84baeb1550d03a085e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/bc203251dad94e84baeb1550d03a085e 2024-12-12T19:34:07,148 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/9c77965a306b4986a777b61157371cfe to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/9c77965a306b4986a777b61157371cfe 2024-12-12T19:34:07,148 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/54a01e7ea1bd42ed8f86e260afe19ee4 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/54a01e7ea1bd42ed8f86e260afe19ee4 2024-12-12T19:34:07,148 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/5af54c10ffdb4b26b0d827b86e60df18 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/5af54c10ffdb4b26b0d827b86e60df18 2024-12-12T19:34:07,148 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/c22b619d3d344313a61a9ae53c0eb237 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/c22b619d3d344313a61a9ae53c0eb237 2024-12-12T19:34:07,148 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/6b5c96579e9f4b92b14ac86fecc733dc to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/6b5c96579e9f4b92b14ac86fecc733dc 2024-12-12T19:34:07,151 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4bf2d338be4b490b911db784a43964a0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4bf2d338be4b490b911db784a43964a0 2024-12-12T19:34:07,156 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/8b878b5b73f346568ddffae276fb9566 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/8b878b5b73f346568ddffae276fb9566 2024-12-12T19:34:07,156 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/f339efab72314582a4ecceed101deb03 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/f339efab72314582a4ecceed101deb03 2024-12-12T19:34:07,156 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/5e7520040c1e43f7a325accd59a15379 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/5e7520040c1e43f7a325accd59a15379 2024-12-12T19:34:07,156 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/66b9b0c700c945c5aa90ef1fef3d2548 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/66b9b0c700c945c5aa90ef1fef3d2548 2024-12-12T19:34:07,156 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/3c0c17a31f1f4533974237a56a18f1a4 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/3c0c17a31f1f4533974237a56a18f1a4 2024-12-12T19:34:07,157 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/298f7c8b875048988aa7616c717faeac to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/298f7c8b875048988aa7616c717faeac 2024-12-12T19:34:07,157 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d8b87aeb6537439e9df733680245fd7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d8b87aeb6537439e9df733680245fd7f 2024-12-12T19:34:07,163 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/8934fd4b0e8c4a0da0c330b421a68926 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/8934fd4b0e8c4a0da0c330b421a68926 2024-12-12T19:34:07,170 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/4c9c438b6eeb:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/82f419259bf24f4597c54a058ae55087, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/685cb7ccb6ed41ed945a90daa81f748f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/907624b7a69d40f59d066f25f7090270, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/fa6535574f1e4fc3babf9237c596084f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/acc46ddabae94da7baa7f4de17a38794, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3de39448bda949ecbe5499786653a5e6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/e080fad9a5e044d9b488a1913f656647, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/1e8993cba70e48439ee9df2ed14e5d08, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/951d5129edeb4346977d6b119df95662, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/8734c44a9a04445e991043b05ffef3dd, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3c2390f2ff8044c0a1fe329ce1e77732, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/c2ba9604e2a847868cccba5fbbf2fd45, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/92cdaecaf4d04c6bb84dcc5631b94ba5, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/950d15dd1bf44f37964d72fb82eba781, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/2cb9b1cf03094a52965dd01753da1763, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/30cd9ef28d7f4dc9ab6cc90c57bf8b70] to archive 2024-12-12T19:34:07,183 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/4c9c438b6eeb:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:34:07,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ef478ca5ade14c638608656110753e17_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032046166/Put/seqid=0 2024-12-12T19:34:07,200 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/685cb7ccb6ed41ed945a90daa81f748f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/685cb7ccb6ed41ed945a90daa81f748f 2024-12-12T19:34:07,201 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/acc46ddabae94da7baa7f4de17a38794 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/acc46ddabae94da7baa7f4de17a38794 2024-12-12T19:34:07,201 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/907624b7a69d40f59d066f25f7090270 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/907624b7a69d40f59d066f25f7090270 2024-12-12T19:34:07,201 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/fa6535574f1e4fc3babf9237c596084f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/fa6535574f1e4fc3babf9237c596084f 2024-12-12T19:34:07,201 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/1e8993cba70e48439ee9df2ed14e5d08 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/1e8993cba70e48439ee9df2ed14e5d08 2024-12-12T19:34:07,201 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/82f419259bf24f4597c54a058ae55087 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/82f419259bf24f4597c54a058ae55087 2024-12-12T19:34:07,201 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3de39448bda949ecbe5499786653a5e6 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3de39448bda949ecbe5499786653a5e6 2024-12-12T19:34:07,201 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/e080fad9a5e044d9b488a1913f656647 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/e080fad9a5e044d9b488a1913f656647 2024-12-12T19:34:07,210 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/92cdaecaf4d04c6bb84dcc5631b94ba5 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/92cdaecaf4d04c6bb84dcc5631b94ba5 2024-12-12T19:34:07,210 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/c2ba9604e2a847868cccba5fbbf2fd45 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/c2ba9604e2a847868cccba5fbbf2fd45 2024-12-12T19:34:07,210 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/8734c44a9a04445e991043b05ffef3dd to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/8734c44a9a04445e991043b05ffef3dd 2024-12-12T19:34:07,210 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3c2390f2ff8044c0a1fe329ce1e77732 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3c2390f2ff8044c0a1fe329ce1e77732 2024-12-12T19:34:07,210 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/950d15dd1bf44f37964d72fb82eba781 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/950d15dd1bf44f37964d72fb82eba781 2024-12-12T19:34:07,210 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/951d5129edeb4346977d6b119df95662 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/951d5129edeb4346977d6b119df95662 2024-12-12T19:34:07,210 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/2cb9b1cf03094a52965dd01753da1763 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/2cb9b1cf03094a52965dd01753da1763 2024-12-12T19:34:07,210 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/30cd9ef28d7f4dc9ab6cc90c57bf8b70 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/30cd9ef28d7f4dc9ab6cc90c57bf8b70 2024-12-12T19:34:07,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742195_1371 (size=12454) 2024-12-12T19:34:07,224 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:07,263 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ef478ca5ade14c638608656110753e17_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ef478ca5ade14c638608656110753e17_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:07,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:07,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032107255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:07,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-12T19:34:07,266 INFO [Thread-1370 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-12-12T19:34:07,279 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:07,279 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/e297cb5c6fe441c4bcb828a3db8a8825, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:07,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/e297cb5c6fe441c4bcb828a3db8a8825 is 175, key is test_row_0/A:col10/1734032046166/Put/seqid=0 2024-12-12T19:34:07,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-12-12T19:34:07,281 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:07,281 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:07,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:07,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T19:34:07,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:07,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032107327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:07,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:07,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032107327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:07,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742196_1372 (size=31255) 2024-12-12T19:34:07,348 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/e297cb5c6fe441c4bcb828a3db8a8825 2024-12-12T19:34:07,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/758da933b6a74b80b9ce725bd1d2ecf0 is 50, key is test_row_0/B:col10/1734032046166/Put/seqid=0 2024-12-12T19:34:07,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:07,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032107372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:07,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742197_1373 (size=12301) 2024-12-12T19:34:07,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T19:34:07,391 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/758da933b6a74b80b9ce725bd1d2ecf0 2024-12-12T19:34:07,434 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:07,435 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/06228e77284247d4aac9893cf8b99f9b is 50, key is test_row_0/C:col10/1734032046166/Put/seqid=0 2024-12-12T19:34:07,441 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:07,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:07,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:07,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:07,443 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:07,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:07,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:07,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742198_1374 (size=12301) 2024-12-12T19:34:07,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/06228e77284247d4aac9893cf8b99f9b 2024-12-12T19:34:07,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/e297cb5c6fe441c4bcb828a3db8a8825 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/e297cb5c6fe441c4bcb828a3db8a8825 2024-12-12T19:34:07,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/e297cb5c6fe441c4bcb828a3db8a8825, entries=150, sequenceid=293, filesize=30.5 K 2024-12-12T19:34:07,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/758da933b6a74b80b9ce725bd1d2ecf0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/758da933b6a74b80b9ce725bd1d2ecf0 2024-12-12T19:34:07,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/758da933b6a74b80b9ce725bd1d2ecf0, entries=150, sequenceid=293, filesize=12.0 K 2024-12-12T19:34:07,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/06228e77284247d4aac9893cf8b99f9b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/06228e77284247d4aac9893cf8b99f9b 2024-12-12T19:34:07,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/06228e77284247d4aac9893cf8b99f9b, entries=150, sequenceid=293, filesize=12.0 K 2024-12-12T19:34:07,544 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for a329d898f2cbc923ae8747673ded3106 in 410ms, sequenceid=293, compaction requested=true 2024-12-12T19:34:07,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:07,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:07,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:07,544 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:07,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:07,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:07,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:07,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:34:07,547 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:07,555 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133910 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:07,555 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/A is initiating minor compaction (all files) 2024-12-12T19:34:07,555 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/A in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:07,555 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/14e1ded481d54e8cbf5f42694a48e394, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/22445e28227341589b9f1e481af057eb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/61986d3b7ef34a94bc956d82e8375f62, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/e297cb5c6fe441c4bcb828a3db8a8825] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=130.8 K 2024-12-12T19:34:07,555 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:07,555 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/14e1ded481d54e8cbf5f42694a48e394, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/22445e28227341589b9f1e481af057eb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/61986d3b7ef34a94bc956d82e8375f62, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/e297cb5c6fe441c4bcb828a3db8a8825] 2024-12-12T19:34:07,557 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 14e1ded481d54e8cbf5f42694a48e394, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734032042263 2024-12-12T19:34:07,558 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:07,558 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/B is initiating minor compaction (all files) 2024-12-12T19:34:07,558 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/B in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:07,558 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/28ddea329f954379a5390b3df11db9fa, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/75c70efba2a242628dd704f28a55e11f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4d5879440c094209aa3960c93e77d22d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/758da933b6a74b80b9ce725bd1d2ecf0] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=48.3 K 2024-12-12T19:34:07,558 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 22445e28227341589b9f1e481af057eb, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1734032043590 2024-12-12T19:34:07,559 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28ddea329f954379a5390b3df11db9fa, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734032042263 2024-12-12T19:34:07,559 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 61986d3b7ef34a94bc956d82e8375f62, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1734032044795 2024-12-12T19:34:07,559 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75c70efba2a242628dd704f28a55e11f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1734032043590 2024-12-12T19:34:07,559 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting e297cb5c6fe441c4bcb828a3db8a8825, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1734032046166 2024-12-12T19:34:07,559 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d5879440c094209aa3960c93e77d22d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1734032044795 2024-12-12T19:34:07,559 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 758da933b6a74b80b9ce725bd1d2ecf0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1734032046166 2024-12-12T19:34:07,567 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#B#compaction#309 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:07,568 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/f720e0c72c8f49d9bfa451da05d1fb2c is 50, key is test_row_0/B:col10/1734032046166/Put/seqid=0 2024-12-12T19:34:07,575 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:07,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T19:34:07,589 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412120f91ee33fb044fa3a350cff46305c5ce_a329d898f2cbc923ae8747673ded3106 store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:07,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T19:34:07,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:34:07,591 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412120f91ee33fb044fa3a350cff46305c5ce_a329d898f2cbc923ae8747673ded3106, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:07,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:07,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:34:07,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:07,591 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120f91ee33fb044fa3a350cff46305c5ce_a329d898f2cbc923ae8747673ded3106 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:07,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:34:07,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:07,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:07,601 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:07,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:07,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:07,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:07,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:07,603 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:07,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:07,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:07,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742199_1375 (size=12439) 2024-12-12T19:34:07,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742200_1376 (size=4469) 2024-12-12T19:34:07,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121270ffc173ecbb431a81e3990c12711933_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032047589/Put/seqid=0 2024-12-12T19:34:07,668 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/f720e0c72c8f49d9bfa451da05d1fb2c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/f720e0c72c8f49d9bfa451da05d1fb2c 2024-12-12T19:34:07,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742201_1377 (size=14994) 2024-12-12T19:34:07,693 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:07,694 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a329d898f2cbc923ae8747673ded3106/B of a329d898f2cbc923ae8747673ded3106 into f720e0c72c8f49d9bfa451da05d1fb2c(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:07,694 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:07,694 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/B, priority=12, startTime=1734032047544; duration=0sec 2024-12-12T19:34:07,694 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:07,695 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:B 2024-12-12T19:34:07,695 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:07,707 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:07,707 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/C is initiating minor compaction (all files) 2024-12-12T19:34:07,707 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/C in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:07,707 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/a79481e105be4ea7a6562afa90c590b7, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3d15302c329e4798b1b8f827f9d5614a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3e58d87bdf564a4b8039e1d8e2ae0354, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/06228e77284247d4aac9893cf8b99f9b] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=48.3 K 2024-12-12T19:34:07,720 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting a79481e105be4ea7a6562afa90c590b7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734032042263 2024-12-12T19:34:07,724 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d15302c329e4798b1b8f827f9d5614a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1734032043590 2024-12-12T19:34:07,724 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e58d87bdf564a4b8039e1d8e2ae0354, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1734032044795 2024-12-12T19:34:07,728 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06228e77284247d4aac9893cf8b99f9b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1734032046166 2024-12-12T19:34:07,731 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121270ffc173ecbb431a81e3990c12711933_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121270ffc173ecbb431a81e3990c12711933_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:07,734 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/36549a9a991b46b794808b5bade71196, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:07,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/36549a9a991b46b794808b5bade71196 is 175, key is test_row_0/A:col10/1734032047589/Put/seqid=0 2024-12-12T19:34:07,765 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#C#compaction#312 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:07,765 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:07,765 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:07,765 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/d2e030fb2ae348cc9acd7590d7a645c9 is 50, key is test_row_0/C:col10/1734032046166/Put/seqid=0 2024-12-12T19:34:07,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:07,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:07,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:07,766 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:07,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:07,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:07,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742202_1378 (size=39949) 2024-12-12T19:34:07,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742203_1379 (size=12439) 2024-12-12T19:34:07,816 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=306, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/36549a9a991b46b794808b5bade71196 2024-12-12T19:34:07,834 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/d2e030fb2ae348cc9acd7590d7a645c9 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/d2e030fb2ae348cc9acd7590d7a645c9 2024-12-12T19:34:07,839 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a329d898f2cbc923ae8747673ded3106/C of a329d898f2cbc923ae8747673ded3106 into d2e030fb2ae348cc9acd7590d7a645c9(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:07,839 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:07,839 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/C, priority=12, startTime=1734032047544; duration=0sec 2024-12-12T19:34:07,839 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:07,839 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:C 2024-12-12T19:34:07,848 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/063db593adcb4a55a1c226f7c70e9517 is 50, key is test_row_0/B:col10/1734032047589/Put/seqid=0 2024-12-12T19:34:07,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:07,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032107845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:07,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742204_1380 (size=12301) 2024-12-12T19:34:07,889 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/063db593adcb4a55a1c226f7c70e9517 2024-12-12T19:34:07,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T19:34:07,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/9f0bab910a1e49b791657146e8e27b15 is 50, key is test_row_0/C:col10/1734032047589/Put/seqid=0 2024-12-12T19:34:07,921 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:07,928 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:07,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:07,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:07,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:07,928 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:07,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:07,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:07,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742205_1381 (size=12301) 2024-12-12T19:34:07,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:07,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032107957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:08,059 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#A#compaction#310 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:08,060 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/0208d4a8fe1141fcb0e3ac02af426395 is 175, key is test_row_0/A:col10/1734032046166/Put/seqid=0 2024-12-12T19:34:08,083 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:08,083 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:08,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:08,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:08,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:08,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742206_1382 (size=31393) 2024-12-12T19:34:08,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032108175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:08,241 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:08,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:08,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:08,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:08,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:08,242 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032108338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:08,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032108343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:08,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/9f0bab910a1e49b791657146e8e27b15 2024-12-12T19:34:08,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/36549a9a991b46b794808b5bade71196 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/36549a9a991b46b794808b5bade71196 2024-12-12T19:34:08,379 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/36549a9a991b46b794808b5bade71196, entries=200, sequenceid=306, filesize=39.0 K 2024-12-12T19:34:08,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/063db593adcb4a55a1c226f7c70e9517 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/063db593adcb4a55a1c226f7c70e9517 2024-12-12T19:34:08,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T19:34:08,394 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:08,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:08,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:08,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:08,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:08,395 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/063db593adcb4a55a1c226f7c70e9517, entries=150, sequenceid=306, filesize=12.0 K 2024-12-12T19:34:08,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/9f0bab910a1e49b791657146e8e27b15 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/9f0bab910a1e49b791657146e8e27b15 2024-12-12T19:34:08,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/9f0bab910a1e49b791657146e8e27b15, entries=150, sequenceid=306, filesize=12.0 K 2024-12-12T19:34:08,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for a329d898f2cbc923ae8747673ded3106 in 822ms, sequenceid=306, compaction requested=false 2024-12-12T19:34:08,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:08,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:08,503 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T19:34:08,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:34:08,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:08,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:34:08,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:08,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:34:08,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:08,533 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121202779f143fd540c8970736e22c7f0bfc_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032047799/Put/seqid=0 2024-12-12T19:34:08,550 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/0208d4a8fe1141fcb0e3ac02af426395 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0208d4a8fe1141fcb0e3ac02af426395 2024-12-12T19:34:08,550 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:08,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:08,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:08,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:08,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:08,551 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,555 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a329d898f2cbc923ae8747673ded3106/A of a329d898f2cbc923ae8747673ded3106 into 0208d4a8fe1141fcb0e3ac02af426395(size=30.7 K), total size for store is 69.7 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-12-12T19:34:08,555 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:08,555 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/A, priority=12, startTime=1734032047544; duration=1sec 2024-12-12T19:34:08,555 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:08,555 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:A 2024-12-12T19:34:08,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742207_1383 (size=12454) 2024-12-12T19:34:08,580 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:08,593 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121202779f143fd540c8970736e22c7f0bfc_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121202779f143fd540c8970736e22c7f0bfc_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:08,594 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/1c48ad20e01248aca68b5510be0926b2, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:08,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/1c48ad20e01248aca68b5510be0926b2 is 175, key is test_row_0/A:col10/1734032047799/Put/seqid=0 2024-12-12T19:34:08,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:08,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032108601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:08,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742208_1384 (size=31255) 2024-12-12T19:34:08,703 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:08,704 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:08,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:08,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:08,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:08,704 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:08,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032108710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:08,860 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:08,862 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:08,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:08,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:08,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:08,862 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:08,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:08,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032108920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:09,017 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:09,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:09,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:09,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:09,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:09,019 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,056 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=332, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/1c48ad20e01248aca68b5510be0926b2 2024-12-12T19:34:09,121 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/a246c2aed94c42edb8a63d5370195777 is 50, key is test_row_0/B:col10/1734032047799/Put/seqid=0 2024-12-12T19:34:09,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742209_1385 (size=12301) 2024-12-12T19:34:09,181 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:09,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:09,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:09,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:09,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:09,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:09,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032109228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:09,337 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:09,340 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:09,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:09,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:09,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:09,340 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T19:34:09,503 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:09,504 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:09,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:09,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:09,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:09,504 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/a246c2aed94c42edb8a63d5370195777 2024-12-12T19:34:09,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/987b460804914cbb9ee2f7a0061f5b1e is 50, key is test_row_0/C:col10/1734032047799/Put/seqid=0 2024-12-12T19:34:09,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742210_1386 (size=12301) 2024-12-12T19:34:09,658 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:09,658 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:09,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:09,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:09,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:09,658 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:09,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032109751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:09,811 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:09,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:09,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:09,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:09,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:09,816 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,971 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:09,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:09,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:09,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:09,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:09,972 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:09,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:10,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/987b460804914cbb9ee2f7a0061f5b1e 2024-12-12T19:34:10,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/1c48ad20e01248aca68b5510be0926b2 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/1c48ad20e01248aca68b5510be0926b2 2024-12-12T19:34:10,100 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/1c48ad20e01248aca68b5510be0926b2, entries=150, sequenceid=332, filesize=30.5 K 2024-12-12T19:34:10,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/a246c2aed94c42edb8a63d5370195777 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/a246c2aed94c42edb8a63d5370195777 2024-12-12T19:34:10,114 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/a246c2aed94c42edb8a63d5370195777, entries=150, sequenceid=332, filesize=12.0 K 2024-12-12T19:34:10,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/987b460804914cbb9ee2f7a0061f5b1e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/987b460804914cbb9ee2f7a0061f5b1e 2024-12-12T19:34:10,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/987b460804914cbb9ee2f7a0061f5b1e, entries=150, sequenceid=332, filesize=12.0 K 2024-12-12T19:34:10,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for a329d898f2cbc923ae8747673ded3106 in 1625ms, sequenceid=332, compaction requested=true 2024-12-12T19:34:10,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:10,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:10,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:10,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:10,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:34:10,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:10,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-12T19:34:10,131 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:10,131 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:10,135 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37041 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:10,135 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/C is initiating minor compaction (all files) 2024-12-12T19:34:10,135 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/C in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:10,135 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/d2e030fb2ae348cc9acd7590d7a645c9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/9f0bab910a1e49b791657146e8e27b15, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/987b460804914cbb9ee2f7a0061f5b1e] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=36.2 K 2024-12-12T19:34:10,136 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting d2e030fb2ae348cc9acd7590d7a645c9, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1734032046166 2024-12-12T19:34:10,137 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f0bab910a1e49b791657146e8e27b15, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1734032047213 2024-12-12T19:34:10,137 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 987b460804914cbb9ee2f7a0061f5b1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734032047799 2024-12-12T19:34:10,137 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:10,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T19:34:10,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:10,143 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102597 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:10,143 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:34:10,143 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/A is initiating minor compaction (all files) 2024-12-12T19:34:10,143 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/A in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:10,144 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0208d4a8fe1141fcb0e3ac02af426395, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/36549a9a991b46b794808b5bade71196, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/1c48ad20e01248aca68b5510be0926b2] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=100.2 K 2024-12-12T19:34:10,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:34:10,144 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:10,144 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0208d4a8fe1141fcb0e3ac02af426395, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/36549a9a991b46b794808b5bade71196, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/1c48ad20e01248aca68b5510be0926b2] 2024-12-12T19:34:10,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:10,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:34:10,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:10,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:34:10,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:10,147 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0208d4a8fe1141fcb0e3ac02af426395, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1734032046166 2024-12-12T19:34:10,151 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36549a9a991b46b794808b5bade71196, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1734032047213 2024-12-12T19:34:10,157 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c48ad20e01248aca68b5510be0926b2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734032047799 2024-12-12T19:34:10,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e29aafb8593740d2be9934ac3d8a1d43_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032048514/Put/seqid=0 2024-12-12T19:34:10,200 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#C#compaction#319 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:10,201 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/7d0f2f8ea9bf4395b207bb83ce0820d0 is 50, key is test_row_0/C:col10/1734032047799/Put/seqid=0 2024-12-12T19:34:10,208 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:10,218 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212304ed7cf715b484589e3598a685c912a_a329d898f2cbc923ae8747673ded3106 store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:10,220 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212304ed7cf715b484589e3598a685c912a_a329d898f2cbc923ae8747673ded3106, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:10,220 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212304ed7cf715b484589e3598a685c912a_a329d898f2cbc923ae8747673ded3106 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:10,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742211_1387 (size=12454) 2024-12-12T19:34:10,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:10,249 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e29aafb8593740d2be9934ac3d8a1d43_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e29aafb8593740d2be9934ac3d8a1d43_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:10,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/dd2db03aec384c57a67e981437ea32b9, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:10,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/dd2db03aec384c57a67e981437ea32b9 is 175, key is test_row_0/A:col10/1734032048514/Put/seqid=0 2024-12-12T19:34:10,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742212_1388 (size=12541) 2024-12-12T19:34:10,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742214_1390 (size=31255) 2024-12-12T19:34:10,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742213_1389 (size=4469) 2024-12-12T19:34:10,307 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=345, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/dd2db03aec384c57a67e981437ea32b9 2024-12-12T19:34:10,311 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#A#compaction#320 average throughput is 0.24 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:10,311 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/0ae9e454b2064a628d2845a0a02c7473 is 175, key is test_row_0/A:col10/1734032047799/Put/seqid=0 2024-12-12T19:34:10,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/efe982922232465fb4f6ae2ba52d3b5b is 50, key is test_row_0/B:col10/1734032048514/Put/seqid=0 2024-12-12T19:34:10,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742215_1391 (size=31495) 2024-12-12T19:34:10,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742216_1392 (size=12301) 2024-12-12T19:34:10,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:10,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:10,385 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/0ae9e454b2064a628d2845a0a02c7473 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0ae9e454b2064a628d2845a0a02c7473 2024-12-12T19:34:10,404 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/A of a329d898f2cbc923ae8747673ded3106 into 0ae9e454b2064a628d2845a0a02c7473(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:10,404 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:10,405 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/A, priority=13, startTime=1734032050128; duration=0sec 2024-12-12T19:34:10,405 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:10,405 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:A 2024-12-12T19:34:10,405 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:10,406 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37041 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:10,406 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/B is initiating minor compaction (all files) 2024-12-12T19:34:10,406 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/B in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:10,406 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/f720e0c72c8f49d9bfa451da05d1fb2c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/063db593adcb4a55a1c226f7c70e9517, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/a246c2aed94c42edb8a63d5370195777] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=36.2 K 2024-12-12T19:34:10,406 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting f720e0c72c8f49d9bfa451da05d1fb2c, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1734032046166 2024-12-12T19:34:10,407 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 063db593adcb4a55a1c226f7c70e9517, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1734032047213 2024-12-12T19:34:10,407 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting a246c2aed94c42edb8a63d5370195777, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734032047799 2024-12-12T19:34:10,415 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#B#compaction#322 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:10,415 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/8be9ae1a80a54d63adb7e8cc5f75b3e2 is 50, key is test_row_0/B:col10/1734032047799/Put/seqid=0 2024-12-12T19:34:10,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742217_1393 (size=12541) 2024-12-12T19:34:10,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:10,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032110489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:10,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:10,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032110491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:10,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:10,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032110597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:10,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:10,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032110600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:10,704 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/7d0f2f8ea9bf4395b207bb83ce0820d0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/7d0f2f8ea9bf4395b207bb83ce0820d0 2024-12-12T19:34:10,720 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/C of a329d898f2cbc923ae8747673ded3106 into 7d0f2f8ea9bf4395b207bb83ce0820d0(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:10,720 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:10,720 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/C, priority=13, startTime=1734032050128; duration=0sec 2024-12-12T19:34:10,720 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:10,720 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:C 2024-12-12T19:34:10,776 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/efe982922232465fb4f6ae2ba52d3b5b 2024-12-12T19:34:10,778 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:10,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032110775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:10,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/fbeefa76e032487680f7c7c8c39318f8 is 50, key is test_row_0/C:col10/1734032048514/Put/seqid=0 2024-12-12T19:34:10,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:10,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032110814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:10,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:10,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032110816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:10,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742218_1394 (size=12301) 2024-12-12T19:34:10,871 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/8be9ae1a80a54d63adb7e8cc5f75b3e2 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/8be9ae1a80a54d63adb7e8cc5f75b3e2 2024-12-12T19:34:10,877 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/B of a329d898f2cbc923ae8747673ded3106 into 8be9ae1a80a54d63adb7e8cc5f75b3e2(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:10,877 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:10,877 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/B, priority=13, startTime=1734032050128; duration=0sec 2024-12-12T19:34:10,877 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:10,877 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:B 2024-12-12T19:34:11,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:11,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032111124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:11,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:11,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032111124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:11,260 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/fbeefa76e032487680f7c7c8c39318f8 2024-12-12T19:34:11,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/dd2db03aec384c57a67e981437ea32b9 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/dd2db03aec384c57a67e981437ea32b9 2024-12-12T19:34:11,295 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/dd2db03aec384c57a67e981437ea32b9, entries=150, sequenceid=345, filesize=30.5 K 2024-12-12T19:34:11,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/efe982922232465fb4f6ae2ba52d3b5b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/efe982922232465fb4f6ae2ba52d3b5b 2024-12-12T19:34:11,314 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/efe982922232465fb4f6ae2ba52d3b5b, entries=150, sequenceid=345, filesize=12.0 K 2024-12-12T19:34:11,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/fbeefa76e032487680f7c7c8c39318f8 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/fbeefa76e032487680f7c7c8c39318f8 2024-12-12T19:34:11,323 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/fbeefa76e032487680f7c7c8c39318f8, entries=150, sequenceid=345, filesize=12.0 K 2024-12-12T19:34:11,326 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for a329d898f2cbc923ae8747673ded3106 in 1182ms, sequenceid=345, compaction requested=false 2024-12-12T19:34:11,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:11,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:11,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-12T19:34:11,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-12T19:34:11,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-12T19:34:11,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.0460 sec 2024-12-12T19:34:11,336 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 4.0560 sec 2024-12-12T19:34:11,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T19:34:11,392 INFO [Thread-1370 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-12-12T19:34:11,396 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-12-12T19:34:11,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T19:34:11,400 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:11,400 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:11,400 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:11,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T19:34:11,552 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:11,555 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-12T19:34:11,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:11,556 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T19:34:11,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:34:11,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:11,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:34:11,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:11,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:34:11,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:11,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412122dfb87e0707648fa94f9114a32c5f0cc_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032050489/Put/seqid=0 2024-12-12T19:34:11,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742219_1395 (size=12454) 2024-12-12T19:34:11,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:11,631 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412122dfb87e0707648fa94f9114a32c5f0cc_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412122dfb87e0707648fa94f9114a32c5f0cc_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:11,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/1598eb4056634a3fbbf66fb6e6f85aa7, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:11,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/1598eb4056634a3fbbf66fb6e6f85aa7 is 175, key is test_row_0/A:col10/1734032050489/Put/seqid=0 2024-12-12T19:34:11,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:11,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:11,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742220_1396 (size=31255) 2024-12-12T19:34:11,666 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=372, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/1598eb4056634a3fbbf66fb6e6f85aa7 2024-12-12T19:34:11,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/005e6e92676243cd818fb1a15bc1fb9f is 50, key is test_row_0/B:col10/1734032050489/Put/seqid=0 2024-12-12T19:34:11,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T19:34:11,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032111723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:11,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:11,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032111727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:11,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742221_1397 (size=12301) 2024-12-12T19:34:11,754 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/005e6e92676243cd818fb1a15bc1fb9f 2024-12-12T19:34:11,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/30cdfe8c442b4840b4dcbe560e715645 is 50, key is test_row_0/C:col10/1734032050489/Put/seqid=0 2024-12-12T19:34:11,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742222_1398 (size=12301) 2024-12-12T19:34:11,821 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/30cdfe8c442b4840b4dcbe560e715645 2024-12-12T19:34:11,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:11,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032111831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:11,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:11,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032111840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:11,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/1598eb4056634a3fbbf66fb6e6f85aa7 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/1598eb4056634a3fbbf66fb6e6f85aa7 2024-12-12T19:34:11,868 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/1598eb4056634a3fbbf66fb6e6f85aa7, entries=150, sequenceid=372, filesize=30.5 K 2024-12-12T19:34:11,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/005e6e92676243cd818fb1a15bc1fb9f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/005e6e92676243cd818fb1a15bc1fb9f 2024-12-12T19:34:11,886 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/005e6e92676243cd818fb1a15bc1fb9f, entries=150, sequenceid=372, filesize=12.0 K 2024-12-12T19:34:11,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/30cdfe8c442b4840b4dcbe560e715645 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/30cdfe8c442b4840b4dcbe560e715645 2024-12-12T19:34:11,911 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/30cdfe8c442b4840b4dcbe560e715645, entries=150, sequenceid=372, filesize=12.0 K 2024-12-12T19:34:11,912 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for a329d898f2cbc923ae8747673ded3106 in 356ms, sequenceid=372, compaction requested=true 2024-12-12T19:34:11,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:11,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:11,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-12-12T19:34:11,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-12-12T19:34:11,931 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-12T19:34:11,931 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 518 msec 2024-12-12T19:34:11,933 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 536 msec 2024-12-12T19:34:12,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T19:34:12,014 INFO [Thread-1370 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-12T19:34:12,016 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:12,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-12T19:34:12,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T19:34:12,030 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:12,036 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:12,036 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:12,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:12,055 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T19:34:12,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:34:12,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:12,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:34:12,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:12,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:34:12,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:12,064 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d43597fee5b64c18914e02123417787a_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032052045/Put/seqid=0 2024-12-12T19:34:12,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742223_1399 (size=14994) 2024-12-12T19:34:12,104 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:12,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T19:34:12,127 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d43597fee5b64c18914e02123417787a_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d43597fee5b64c18914e02123417787a_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:12,131 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/340419c440484e6f81c08faf5ff855d6, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:12,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/340419c440484e6f81c08faf5ff855d6 is 175, key is test_row_0/A:col10/1734032052045/Put/seqid=0 2024-12-12T19:34:12,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742224_1400 (size=39949) 2024-12-12T19:34:12,156 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=385, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/340419c440484e6f81c08faf5ff855d6 2024-12-12T19:34:12,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/4ebfb0039af54be0a58b463ea0aba74c is 50, key is test_row_0/B:col10/1734032052045/Put/seqid=0 2024-12-12T19:34:12,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:12,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032112160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:12,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:12,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032112158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:12,198 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:12,199 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-12T19:34:12,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:12,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:12,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:12,200 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:12,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:12,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:12,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742225_1401 (size=12301) 2024-12-12T19:34:12,210 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/4ebfb0039af54be0a58b463ea0aba74c 2024-12-12T19:34:12,233 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/aecd4882de03411c80221e28a081476c is 50, key is test_row_0/C:col10/1734032052045/Put/seqid=0 2024-12-12T19:34:12,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:12,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032112268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:12,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742226_1402 (size=12301) 2024-12-12T19:34:12,275 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/aecd4882de03411c80221e28a081476c 2024-12-12T19:34:12,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:12,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032112279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:12,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/340419c440484e6f81c08faf5ff855d6 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/340419c440484e6f81c08faf5ff855d6 2024-12-12T19:34:12,293 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/340419c440484e6f81c08faf5ff855d6, entries=200, sequenceid=385, filesize=39.0 K 2024-12-12T19:34:12,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/4ebfb0039af54be0a58b463ea0aba74c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4ebfb0039af54be0a58b463ea0aba74c 2024-12-12T19:34:12,324 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4ebfb0039af54be0a58b463ea0aba74c, entries=150, sequenceid=385, filesize=12.0 K 2024-12-12T19:34:12,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/aecd4882de03411c80221e28a081476c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/aecd4882de03411c80221e28a081476c 2024-12-12T19:34:12,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T19:34:12,333 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/aecd4882de03411c80221e28a081476c, entries=150, sequenceid=385, filesize=12.0 K 2024-12-12T19:34:12,334 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for a329d898f2cbc923ae8747673ded3106 in 279ms, sequenceid=385, compaction requested=true 2024-12-12T19:34:12,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:12,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:12,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:12,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:12,334 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:12,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:12,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:12,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:34:12,335 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:12,338 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49444 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:12,338 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:12,338 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/A is initiating minor compaction (all files) 2024-12-12T19:34:12,338 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/B is initiating minor compaction (all files) 2024-12-12T19:34:12,338 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/A in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:12,338 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0ae9e454b2064a628d2845a0a02c7473, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/dd2db03aec384c57a67e981437ea32b9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/1598eb4056634a3fbbf66fb6e6f85aa7, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/340419c440484e6f81c08faf5ff855d6] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=130.8 K 2024-12-12T19:34:12,338 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:12,338 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0ae9e454b2064a628d2845a0a02c7473, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/dd2db03aec384c57a67e981437ea32b9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/1598eb4056634a3fbbf66fb6e6f85aa7, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/340419c440484e6f81c08faf5ff855d6] 2024-12-12T19:34:12,339 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/B in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:12,339 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/8be9ae1a80a54d63adb7e8cc5f75b3e2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/efe982922232465fb4f6ae2ba52d3b5b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/005e6e92676243cd818fb1a15bc1fb9f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4ebfb0039af54be0a58b463ea0aba74c] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=48.3 K 2024-12-12T19:34:12,339 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ae9e454b2064a628d2845a0a02c7473, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734032047799 2024-12-12T19:34:12,343 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 8be9ae1a80a54d63adb7e8cc5f75b3e2, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734032047799 2024-12-12T19:34:12,343 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd2db03aec384c57a67e981437ea32b9, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1734032048514 2024-12-12T19:34:12,347 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting efe982922232465fb4f6ae2ba52d3b5b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1734032048514 2024-12-12T19:34:12,348 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 005e6e92676243cd818fb1a15bc1fb9f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1734032050483 2024-12-12T19:34:12,348 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1598eb4056634a3fbbf66fb6e6f85aa7, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1734032050483 2024-12-12T19:34:12,348 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ebfb0039af54be0a58b463ea0aba74c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1734032051687 2024-12-12T19:34:12,349 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 340419c440484e6f81c08faf5ff855d6, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1734032051687 2024-12-12T19:34:12,351 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:12,352 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-12T19:34:12,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:12,352 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T19:34:12,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:34:12,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:12,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:34:12,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:12,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:34:12,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:12,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a9d97f30ea40461e8586bc98718b32db_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032052158/Put/seqid=0 2024-12-12T19:34:12,392 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#B#compaction#331 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:12,392 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/d9f353e318594116bf2204f0ebbffd8e is 50, key is test_row_0/B:col10/1734032052045/Put/seqid=0 2024-12-12T19:34:12,416 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:12,438 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412124c5beea24e1e4f79b345a0040fc9e8ae_a329d898f2cbc923ae8747673ded3106 store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:12,441 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412124c5beea24e1e4f79b345a0040fc9e8ae_a329d898f2cbc923ae8747673ded3106, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:12,441 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124c5beea24e1e4f79b345a0040fc9e8ae_a329d898f2cbc923ae8747673ded3106 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:12,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742227_1403 (size=12454) 2024-12-12T19:34:12,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742228_1404 (size=12677) 2024-12-12T19:34:12,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742229_1405 (size=4469) 2024-12-12T19:34:12,478 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/d9f353e318594116bf2204f0ebbffd8e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d9f353e318594116bf2204f0ebbffd8e 2024-12-12T19:34:12,480 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#A#compaction#332 average throughput is 0.38 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:12,480 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/bcd131fa4be746d8a991e2902fb22013 is 175, key is test_row_0/A:col10/1734032052045/Put/seqid=0 2024-12-12T19:34:12,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. as already flushing 2024-12-12T19:34:12,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:12,495 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a329d898f2cbc923ae8747673ded3106/B of a329d898f2cbc923ae8747673ded3106 into d9f353e318594116bf2204f0ebbffd8e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:12,495 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:12,495 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/B, priority=12, startTime=1734032052334; duration=0sec 2024-12-12T19:34:12,495 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:12,495 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:B 2024-12-12T19:34:12,495 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:12,507 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49444 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:12,507 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/C is initiating minor compaction (all files) 2024-12-12T19:34:12,508 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/C in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:12,508 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/7d0f2f8ea9bf4395b207bb83ce0820d0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/fbeefa76e032487680f7c7c8c39318f8, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/30cdfe8c442b4840b4dcbe560e715645, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/aecd4882de03411c80221e28a081476c] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=48.3 K 2024-12-12T19:34:12,512 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d0f2f8ea9bf4395b207bb83ce0820d0, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734032047799 2024-12-12T19:34:12,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742230_1406 (size=31631) 2024-12-12T19:34:12,513 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting fbeefa76e032487680f7c7c8c39318f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1734032048514 2024-12-12T19:34:12,516 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 30cdfe8c442b4840b4dcbe560e715645, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1734032050483 2024-12-12T19:34:12,519 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting aecd4882de03411c80221e28a081476c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1734032051687 2024-12-12T19:34:12,534 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/bcd131fa4be746d8a991e2902fb22013 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/bcd131fa4be746d8a991e2902fb22013 2024-12-12T19:34:12,575 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#C#compaction#333 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:12,575 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/df73b5d63bac4b5cbe2344d00912f463 is 50, key is test_row_0/C:col10/1734032052045/Put/seqid=0 2024-12-12T19:34:12,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:12,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032112560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:12,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:12,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032112567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:12,587 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a329d898f2cbc923ae8747673ded3106/A of a329d898f2cbc923ae8747673ded3106 into bcd131fa4be746d8a991e2902fb22013(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:12,588 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:12,588 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/A, priority=12, startTime=1734032052334; duration=0sec 2024-12-12T19:34:12,588 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:12,588 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:A 2024-12-12T19:34:12,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742231_1407 (size=12677) 2024-12-12T19:34:12,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T19:34:12,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:12,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032112680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:12,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:12,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032112695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:12,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:12,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1734032112790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:12,791 DEBUG [Thread-1364 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4190 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:34:12,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:12,865 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a9d97f30ea40461e8586bc98718b32db_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a9d97f30ea40461e8586bc98718b32db_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:12,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/d08818d5303b4b509f48b4d466c88823, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:12,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/d08818d5303b4b509f48b4d466c88823 is 175, key is test_row_0/A:col10/1734032052158/Put/seqid=0 2024-12-12T19:34:12,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742232_1408 (size=31255) 2024-12-12T19:34:12,882 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=408, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/d08818d5303b4b509f48b4d466c88823 2024-12-12T19:34:12,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:12,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032112889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:12,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/d52e78eccf0243448f8a36e8fa8cf5cc is 50, key is test_row_0/B:col10/1734032052158/Put/seqid=0 2024-12-12T19:34:12,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:12,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032112909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:12,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742233_1409 (size=12301) 2024-12-12T19:34:13,009 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/df73b5d63bac4b5cbe2344d00912f463 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/df73b5d63bac4b5cbe2344d00912f463 2024-12-12T19:34:13,013 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a329d898f2cbc923ae8747673ded3106/C of a329d898f2cbc923ae8747673ded3106 into df73b5d63bac4b5cbe2344d00912f463(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:13,013 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:13,013 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/C, priority=12, startTime=1734032052334; duration=0sec 2024-12-12T19:34:13,014 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:13,014 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:C 2024-12-12T19:34:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T19:34:13,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:13,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032113196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:13,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032113211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:13,345 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/d52e78eccf0243448f8a36e8fa8cf5cc 2024-12-12T19:34:13,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/45aea74a9a9d48189889c86eca508718 is 50, key is test_row_0/C:col10/1734032052158/Put/seqid=0 2024-12-12T19:34:13,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742234_1410 (size=12301) 2024-12-12T19:34:13,676 DEBUG [Thread-1375 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x59bd764a to 127.0.0.1:52216 2024-12-12T19:34:13,676 DEBUG [Thread-1375 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:13,680 DEBUG [Thread-1377 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x022a6e9f to 127.0.0.1:52216 2024-12-12T19:34:13,680 DEBUG [Thread-1373 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3eab689a to 127.0.0.1:52216 2024-12-12T19:34:13,680 DEBUG [Thread-1377 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:13,680 DEBUG [Thread-1373 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:13,685 DEBUG [Thread-1371 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68035c67 to 127.0.0.1:52216 2024-12-12T19:34:13,685 DEBUG [Thread-1371 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:13,695 DEBUG [Thread-1379 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x32c12a30 to 127.0.0.1:52216 2024-12-12T19:34:13,695 DEBUG [Thread-1379 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:13,703 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:13,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1734032113703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:13,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:13,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41090 deadline: 1734032113727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:13,820 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/45aea74a9a9d48189889c86eca508718 2024-12-12T19:34:13,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/d08818d5303b4b509f48b4d466c88823 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/d08818d5303b4b509f48b4d466c88823 2024-12-12T19:34:13,827 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/d08818d5303b4b509f48b4d466c88823, entries=150, sequenceid=408, filesize=30.5 K 2024-12-12T19:34:13,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/d52e78eccf0243448f8a36e8fa8cf5cc as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d52e78eccf0243448f8a36e8fa8cf5cc 2024-12-12T19:34:13,836 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d52e78eccf0243448f8a36e8fa8cf5cc, entries=150, sequenceid=408, filesize=12.0 K 2024-12-12T19:34:13,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/45aea74a9a9d48189889c86eca508718 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/45aea74a9a9d48189889c86eca508718 2024-12-12T19:34:13,845 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/45aea74a9a9d48189889c86eca508718, entries=150, sequenceid=408, filesize=12.0 K 2024-12-12T19:34:13,855 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for a329d898f2cbc923ae8747673ded3106 in 1503ms, sequenceid=408, compaction requested=false 2024-12-12T19:34:13,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:13,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:13,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-12T19:34:13,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-12T19:34:13,858 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-12T19:34:13,858 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8210 sec 2024-12-12T19:34:13,861 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.8430 sec 2024-12-12T19:34:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:13,965 DEBUG [Thread-1362 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x319559be to 127.0.0.1:52216 2024-12-12T19:34:13,965 DEBUG [Thread-1362 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:13,965 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-12T19:34:13,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:34:13,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:13,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:34:13,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:13,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:34:13,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:13,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120bef41a503194cff8418bd04b6899719_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_0/A:col10/1734032052551/Put/seqid=0 2024-12-12T19:34:13,979 DEBUG [Thread-1360 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f94d721 to 127.0.0.1:52216 2024-12-12T19:34:13,979 DEBUG [Thread-1360 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:13,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742235_1411 (size=12454) 2024-12-12T19:34:14,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T19:34:14,132 INFO [Thread-1370 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-12T19:34:14,383 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:14,387 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120bef41a503194cff8418bd04b6899719_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120bef41a503194cff8418bd04b6899719_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:14,388 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/74a9ed1a1eb04bbebc6ac67e29fc7d52, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:14,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/74a9ed1a1eb04bbebc6ac67e29fc7d52 is 175, key is test_row_0/A:col10/1734032052551/Put/seqid=0 2024-12-12T19:34:14,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742236_1412 (size=31255) 2024-12-12T19:34:14,719 DEBUG [Thread-1366 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61ec0f48 to 127.0.0.1:52216 2024-12-12T19:34:14,719 DEBUG [Thread-1366 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:14,739 DEBUG [Thread-1368 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7819b9e2 to 127.0.0.1:52216 2024-12-12T19:34:14,739 DEBUG [Thread-1368 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:14,809 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=426, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/74a9ed1a1eb04bbebc6ac67e29fc7d52 2024-12-12T19:34:14,824 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/cd4ef3d9641047c7b72977f0c3d45aa4 is 50, key is test_row_0/B:col10/1734032052551/Put/seqid=0 2024-12-12T19:34:14,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742237_1413 (size=12301) 2024-12-12T19:34:14,855 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/cd4ef3d9641047c7b72977f0c3d45aa4 2024-12-12T19:34:14,866 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/660d5f556fdc4933a954e64a487a0202 is 50, key is test_row_0/C:col10/1734032052551/Put/seqid=0 2024-12-12T19:34:14,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742238_1414 (size=12301) 2024-12-12T19:34:14,919 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/660d5f556fdc4933a954e64a487a0202 2024-12-12T19:34:14,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/74a9ed1a1eb04bbebc6ac67e29fc7d52 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/74a9ed1a1eb04bbebc6ac67e29fc7d52 2024-12-12T19:34:14,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/74a9ed1a1eb04bbebc6ac67e29fc7d52, entries=150, sequenceid=426, filesize=30.5 K 2024-12-12T19:34:14,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/cd4ef3d9641047c7b72977f0c3d45aa4 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/cd4ef3d9641047c7b72977f0c3d45aa4 2024-12-12T19:34:14,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/cd4ef3d9641047c7b72977f0c3d45aa4, entries=150, sequenceid=426, filesize=12.0 K 2024-12-12T19:34:14,995 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/660d5f556fdc4933a954e64a487a0202 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/660d5f556fdc4933a954e64a487a0202 2024-12-12T19:34:14,999 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/660d5f556fdc4933a954e64a487a0202, entries=150, sequenceid=426, filesize=12.0 K 2024-12-12T19:34:15,003 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=20.13 KB/20610 for a329d898f2cbc923ae8747673ded3106 in 1038ms, sequenceid=426, compaction requested=true 2024-12-12T19:34:15,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:15,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:15,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:15,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:15,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:34:15,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a329d898f2cbc923ae8747673ded3106:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:15,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-12T19:34:15,005 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:15,007 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:15,008 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37279 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:15,008 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/C is initiating minor compaction (all files) 2024-12-12T19:34:15,008 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/C in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:15,008 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/df73b5d63bac4b5cbe2344d00912f463, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/45aea74a9a9d48189889c86eca508718, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/660d5f556fdc4933a954e64a487a0202] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=36.4 K 2024-12-12T19:34:15,009 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting df73b5d63bac4b5cbe2344d00912f463, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1734032051687 2024-12-12T19:34:15,009 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45aea74a9a9d48189889c86eca508718, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1734032052100 2024-12-12T19:34:15,009 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 660d5f556fdc4933a954e64a487a0202, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1734032052550 2024-12-12T19:34:15,013 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:15,013 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/A is initiating minor compaction (all files) 2024-12-12T19:34:15,013 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/A in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:15,013 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/bcd131fa4be746d8a991e2902fb22013, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/d08818d5303b4b509f48b4d466c88823, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/74a9ed1a1eb04bbebc6ac67e29fc7d52] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=91.9 K 2024-12-12T19:34:15,013 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:15,013 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/bcd131fa4be746d8a991e2902fb22013, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/d08818d5303b4b509f48b4d466c88823, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/74a9ed1a1eb04bbebc6ac67e29fc7d52] 2024-12-12T19:34:15,016 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting bcd131fa4be746d8a991e2902fb22013, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1734032051687 2024-12-12T19:34:15,019 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#C#compaction#339 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:15,020 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/0fbac77d6dbd490fb4581937f27fd045 is 50, key is test_row_0/C:col10/1734032052551/Put/seqid=0 2024-12-12T19:34:15,020 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting d08818d5303b4b509f48b4d466c88823, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1734032052100 2024-12-12T19:34:15,024 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 74a9ed1a1eb04bbebc6ac67e29fc7d52, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1734032052550 2024-12-12T19:34:15,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742239_1415 (size=12779) 2024-12-12T19:34:15,084 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:15,086 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/0fbac77d6dbd490fb4581937f27fd045 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/0fbac77d6dbd490fb4581937f27fd045 2024-12-12T19:34:15,090 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412123c0c797278914d4ebfcdd45ebac3475e_a329d898f2cbc923ae8747673ded3106 store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:15,092 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412123c0c797278914d4ebfcdd45ebac3475e_a329d898f2cbc923ae8747673ded3106, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:15,092 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123c0c797278914d4ebfcdd45ebac3475e_a329d898f2cbc923ae8747673ded3106 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:15,102 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/C of a329d898f2cbc923ae8747673ded3106 into 0fbac77d6dbd490fb4581937f27fd045(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:15,102 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:15,103 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/C, priority=13, startTime=1734032055004; duration=0sec 2024-12-12T19:34:15,103 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:15,103 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:C 2024-12-12T19:34:15,103 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:15,119 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37279 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:15,119 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): a329d898f2cbc923ae8747673ded3106/B is initiating minor compaction (all files) 2024-12-12T19:34:15,119 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a329d898f2cbc923ae8747673ded3106/B in TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:15,120 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d9f353e318594116bf2204f0ebbffd8e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d52e78eccf0243448f8a36e8fa8cf5cc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/cd4ef3d9641047c7b72977f0c3d45aa4] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp, totalSize=36.4 K 2024-12-12T19:34:15,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742240_1416 (size=4469) 2024-12-12T19:34:15,124 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9f353e318594116bf2204f0ebbffd8e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1734032051687 2024-12-12T19:34:15,129 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting d52e78eccf0243448f8a36e8fa8cf5cc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1734032052100 2024-12-12T19:34:15,135 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd4ef3d9641047c7b72977f0c3d45aa4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1734032052550 2024-12-12T19:34:15,220 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#B#compaction#341 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:15,221 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/50b75f383df64f25bf5c8a1444307af2 is 50, key is test_row_0/B:col10/1734032052551/Put/seqid=0 2024-12-12T19:34:15,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742241_1417 (size=12779) 2024-12-12T19:34:15,343 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/50b75f383df64f25bf5c8a1444307af2 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/50b75f383df64f25bf5c8a1444307af2 2024-12-12T19:34:15,390 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/B of a329d898f2cbc923ae8747673ded3106 into 50b75f383df64f25bf5c8a1444307af2(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:15,390 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:15,390 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/B, priority=13, startTime=1734032055004; duration=0sec 2024-12-12T19:34:15,390 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:15,390 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:B 2024-12-12T19:34:15,539 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a329d898f2cbc923ae8747673ded3106#A#compaction#340 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:15,539 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/00732a1b4cc1440d8ae0a83ec18b4380 is 175, key is test_row_0/A:col10/1734032052551/Put/seqid=0 2024-12-12T19:34:15,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742242_1418 (size=31733) 2024-12-12T19:34:15,609 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/00732a1b4cc1440d8ae0a83ec18b4380 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/00732a1b4cc1440d8ae0a83ec18b4380 2024-12-12T19:34:15,663 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a329d898f2cbc923ae8747673ded3106/A of a329d898f2cbc923ae8747673ded3106 into 00732a1b4cc1440d8ae0a83ec18b4380(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:15,663 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:15,663 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106., storeName=a329d898f2cbc923ae8747673ded3106/A, priority=13, startTime=1734032055004; duration=0sec 2024-12-12T19:34:15,663 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:15,664 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a329d898f2cbc923ae8747673ded3106:A 2024-12-12T19:34:16,819 DEBUG [Thread-1364 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c907e21 to 127.0.0.1:52216 2024-12-12T19:34:16,819 DEBUG [Thread-1364 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 20 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 11 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 133 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 96 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1226 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3678 rows 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1237 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3711 rows 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1212 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3636 rows 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1205 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3615 rows 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1216 2024-12-12T19:34:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3648 rows 2024-12-12T19:34:16,820 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T19:34:16,820 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c336ea4 to 127.0.0.1:52216 2024-12-12T19:34:16,820 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:16,843 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T19:34:16,844 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T19:34:16,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:16,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T19:34:16,861 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734032056861"}]},"ts":"1734032056861"} 2024-12-12T19:34:16,868 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T19:34:16,906 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T19:34:16,907 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T19:34:16,908 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=112, ppid=111, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a329d898f2cbc923ae8747673ded3106, UNASSIGN}] 2024-12-12T19:34:16,909 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=112, ppid=111, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a329d898f2cbc923ae8747673ded3106, UNASSIGN 2024-12-12T19:34:16,910 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=a329d898f2cbc923ae8747673ded3106, regionState=CLOSING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:16,916 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T19:34:16,916 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; CloseRegionProcedure a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:34:16,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T19:34:17,068 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:17,068 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(124): Close a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:17,068 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T19:34:17,068 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1681): Closing a329d898f2cbc923ae8747673ded3106, disabling compactions & flushes 2024-12-12T19:34:17,068 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:17,068 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:17,068 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. after waiting 0 ms 2024-12-12T19:34:17,068 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:17,069 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(2837): Flushing a329d898f2cbc923ae8747673ded3106 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-12T19:34:17,069 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=A 2024-12-12T19:34:17,069 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:17,069 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=B 2024-12-12T19:34:17,069 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:17,069 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a329d898f2cbc923ae8747673ded3106, store=C 2024-12-12T19:34:17,069 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:17,078 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126eab50af3e6841faa4bcb6f1859fb490_a329d898f2cbc923ae8747673ded3106 is 50, key is test_row_1/A:col10/1734032056818/Put/seqid=0 2024-12-12T19:34:17,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742243_1419 (size=9914) 2024-12-12T19:34:17,131 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:17,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T19:34:17,180 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126eab50af3e6841faa4bcb6f1859fb490_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126eab50af3e6841faa4bcb6f1859fb490_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:17,191 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/cd2350fb2e8c4b2ea44e91b0d911a917, store: [table=TestAcidGuarantees family=A region=a329d898f2cbc923ae8747673ded3106] 2024-12-12T19:34:17,192 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/cd2350fb2e8c4b2ea44e91b0d911a917 is 175, key is test_row_1/A:col10/1734032056818/Put/seqid=0 2024-12-12T19:34:17,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742244_1420 (size=22561) 2024-12-12T19:34:17,231 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=436, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/cd2350fb2e8c4b2ea44e91b0d911a917 2024-12-12T19:34:17,285 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/fe8349e12c3e4c5e9a65f1644266b5a0 is 50, key is test_row_1/B:col10/1734032056818/Put/seqid=0 2024-12-12T19:34:17,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742245_1421 (size=9857) 2024-12-12T19:34:17,318 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=436 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/fe8349e12c3e4c5e9a65f1644266b5a0 2024-12-12T19:34:17,343 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/690f62b6b88d4081884f24a49a7cb330 is 50, key is test_row_1/C:col10/1734032056818/Put/seqid=0 2024-12-12T19:34:17,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742246_1422 (size=9857) 2024-12-12T19:34:17,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T19:34:17,770 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=436 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/690f62b6b88d4081884f24a49a7cb330 2024-12-12T19:34:17,799 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/A/cd2350fb2e8c4b2ea44e91b0d911a917 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/cd2350fb2e8c4b2ea44e91b0d911a917 2024-12-12T19:34:17,840 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/cd2350fb2e8c4b2ea44e91b0d911a917, entries=100, sequenceid=436, filesize=22.0 K 2024-12-12T19:34:17,846 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/B/fe8349e12c3e4c5e9a65f1644266b5a0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/fe8349e12c3e4c5e9a65f1644266b5a0 2024-12-12T19:34:17,891 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/fe8349e12c3e4c5e9a65f1644266b5a0, entries=100, sequenceid=436, filesize=9.6 K 2024-12-12T19:34:17,896 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/.tmp/C/690f62b6b88d4081884f24a49a7cb330 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/690f62b6b88d4081884f24a49a7cb330 2024-12-12T19:34:17,915 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/690f62b6b88d4081884f24a49a7cb330, entries=100, sequenceid=436, filesize=9.6 K 2024-12-12T19:34:17,926 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for a329d898f2cbc923ae8747673ded3106 in 857ms, sequenceid=436, compaction requested=false 2024-12-12T19:34:17,927 DEBUG [StoreCloser-TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/14e1ded481d54e8cbf5f42694a48e394, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/22445e28227341589b9f1e481af057eb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/61986d3b7ef34a94bc956d82e8375f62, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0208d4a8fe1141fcb0e3ac02af426395, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/e297cb5c6fe441c4bcb828a3db8a8825, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/36549a9a991b46b794808b5bade71196, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0ae9e454b2064a628d2845a0a02c7473, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/1c48ad20e01248aca68b5510be0926b2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/dd2db03aec384c57a67e981437ea32b9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/1598eb4056634a3fbbf66fb6e6f85aa7, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/340419c440484e6f81c08faf5ff855d6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/bcd131fa4be746d8a991e2902fb22013, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/d08818d5303b4b509f48b4d466c88823, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/74a9ed1a1eb04bbebc6ac67e29fc7d52] to archive 2024-12-12T19:34:17,928 DEBUG [StoreCloser-TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:34:17,947 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/e297cb5c6fe441c4bcb828a3db8a8825 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/e297cb5c6fe441c4bcb828a3db8a8825 2024-12-12T19:34:17,947 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/36549a9a991b46b794808b5bade71196 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/36549a9a991b46b794808b5bade71196 2024-12-12T19:34:17,947 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/1c48ad20e01248aca68b5510be0926b2 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/1c48ad20e01248aca68b5510be0926b2 2024-12-12T19:34:17,947 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0ae9e454b2064a628d2845a0a02c7473 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0ae9e454b2064a628d2845a0a02c7473 2024-12-12T19:34:17,948 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/61986d3b7ef34a94bc956d82e8375f62 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/61986d3b7ef34a94bc956d82e8375f62 2024-12-12T19:34:17,948 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/14e1ded481d54e8cbf5f42694a48e394 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/14e1ded481d54e8cbf5f42694a48e394 2024-12-12T19:34:17,948 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0208d4a8fe1141fcb0e3ac02af426395 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/0208d4a8fe1141fcb0e3ac02af426395 2024-12-12T19:34:17,948 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/22445e28227341589b9f1e481af057eb to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/22445e28227341589b9f1e481af057eb 2024-12-12T19:34:17,950 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/340419c440484e6f81c08faf5ff855d6 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/340419c440484e6f81c08faf5ff855d6 2024-12-12T19:34:17,951 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/d08818d5303b4b509f48b4d466c88823 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/d08818d5303b4b509f48b4d466c88823 2024-12-12T19:34:17,955 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/bcd131fa4be746d8a991e2902fb22013 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/bcd131fa4be746d8a991e2902fb22013 2024-12-12T19:34:17,955 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/1598eb4056634a3fbbf66fb6e6f85aa7 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/1598eb4056634a3fbbf66fb6e6f85aa7 2024-12-12T19:34:17,955 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/dd2db03aec384c57a67e981437ea32b9 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/dd2db03aec384c57a67e981437ea32b9 2024-12-12T19:34:17,956 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/74a9ed1a1eb04bbebc6ac67e29fc7d52 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/74a9ed1a1eb04bbebc6ac67e29fc7d52 2024-12-12T19:34:17,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T19:34:17,983 DEBUG [StoreCloser-TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/28ddea329f954379a5390b3df11db9fa, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/75c70efba2a242628dd704f28a55e11f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4d5879440c094209aa3960c93e77d22d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/f720e0c72c8f49d9bfa451da05d1fb2c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/758da933b6a74b80b9ce725bd1d2ecf0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/063db593adcb4a55a1c226f7c70e9517, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/8be9ae1a80a54d63adb7e8cc5f75b3e2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/a246c2aed94c42edb8a63d5370195777, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/efe982922232465fb4f6ae2ba52d3b5b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/005e6e92676243cd818fb1a15bc1fb9f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d9f353e318594116bf2204f0ebbffd8e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4ebfb0039af54be0a58b463ea0aba74c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d52e78eccf0243448f8a36e8fa8cf5cc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/cd4ef3d9641047c7b72977f0c3d45aa4] to archive 2024-12-12T19:34:17,990 DEBUG [StoreCloser-TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:34:17,996 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/8be9ae1a80a54d63adb7e8cc5f75b3e2 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/8be9ae1a80a54d63adb7e8cc5f75b3e2 2024-12-12T19:34:17,996 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/758da933b6a74b80b9ce725bd1d2ecf0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/758da933b6a74b80b9ce725bd1d2ecf0 2024-12-12T19:34:17,996 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/75c70efba2a242628dd704f28a55e11f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/75c70efba2a242628dd704f28a55e11f 2024-12-12T19:34:17,996 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/28ddea329f954379a5390b3df11db9fa to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/28ddea329f954379a5390b3df11db9fa 2024-12-12T19:34:17,997 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/063db593adcb4a55a1c226f7c70e9517 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/063db593adcb4a55a1c226f7c70e9517 2024-12-12T19:34:17,997 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/f720e0c72c8f49d9bfa451da05d1fb2c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/f720e0c72c8f49d9bfa451da05d1fb2c 2024-12-12T19:34:17,997 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/a246c2aed94c42edb8a63d5370195777 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/a246c2aed94c42edb8a63d5370195777 2024-12-12T19:34:17,997 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4d5879440c094209aa3960c93e77d22d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4d5879440c094209aa3960c93e77d22d 2024-12-12T19:34:18,006 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/efe982922232465fb4f6ae2ba52d3b5b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/efe982922232465fb4f6ae2ba52d3b5b 2024-12-12T19:34:18,006 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4ebfb0039af54be0a58b463ea0aba74c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/4ebfb0039af54be0a58b463ea0aba74c 2024-12-12T19:34:18,006 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d52e78eccf0243448f8a36e8fa8cf5cc to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d52e78eccf0243448f8a36e8fa8cf5cc 2024-12-12T19:34:18,008 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d9f353e318594116bf2204f0ebbffd8e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/d9f353e318594116bf2204f0ebbffd8e 2024-12-12T19:34:18,011 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/005e6e92676243cd818fb1a15bc1fb9f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/005e6e92676243cd818fb1a15bc1fb9f 2024-12-12T19:34:18,011 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/cd4ef3d9641047c7b72977f0c3d45aa4 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/cd4ef3d9641047c7b72977f0c3d45aa4 2024-12-12T19:34:18,012 DEBUG [StoreCloser-TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/a79481e105be4ea7a6562afa90c590b7, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3d15302c329e4798b1b8f827f9d5614a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3e58d87bdf564a4b8039e1d8e2ae0354, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/d2e030fb2ae348cc9acd7590d7a645c9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/06228e77284247d4aac9893cf8b99f9b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/9f0bab910a1e49b791657146e8e27b15, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/7d0f2f8ea9bf4395b207bb83ce0820d0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/987b460804914cbb9ee2f7a0061f5b1e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/fbeefa76e032487680f7c7c8c39318f8, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/30cdfe8c442b4840b4dcbe560e715645, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/df73b5d63bac4b5cbe2344d00912f463, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/aecd4882de03411c80221e28a081476c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/45aea74a9a9d48189889c86eca508718, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/660d5f556fdc4933a954e64a487a0202] to archive 2024-12-12T19:34:18,016 DEBUG [StoreCloser-TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:34:18,036 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/a79481e105be4ea7a6562afa90c590b7 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/a79481e105be4ea7a6562afa90c590b7 2024-12-12T19:34:18,036 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3d15302c329e4798b1b8f827f9d5614a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3d15302c329e4798b1b8f827f9d5614a 2024-12-12T19:34:18,037 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3e58d87bdf564a4b8039e1d8e2ae0354 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/3e58d87bdf564a4b8039e1d8e2ae0354 2024-12-12T19:34:18,044 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/d2e030fb2ae348cc9acd7590d7a645c9 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/d2e030fb2ae348cc9acd7590d7a645c9 2024-12-12T19:34:18,045 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/9f0bab910a1e49b791657146e8e27b15 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/9f0bab910a1e49b791657146e8e27b15 2024-12-12T19:34:18,045 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/06228e77284247d4aac9893cf8b99f9b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/06228e77284247d4aac9893cf8b99f9b 2024-12-12T19:34:18,045 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/987b460804914cbb9ee2f7a0061f5b1e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/987b460804914cbb9ee2f7a0061f5b1e 2024-12-12T19:34:18,045 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/7d0f2f8ea9bf4395b207bb83ce0820d0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/7d0f2f8ea9bf4395b207bb83ce0820d0 2024-12-12T19:34:18,055 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/fbeefa76e032487680f7c7c8c39318f8 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/fbeefa76e032487680f7c7c8c39318f8 2024-12-12T19:34:18,055 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/30cdfe8c442b4840b4dcbe560e715645 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/30cdfe8c442b4840b4dcbe560e715645 2024-12-12T19:34:18,055 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/df73b5d63bac4b5cbe2344d00912f463 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/df73b5d63bac4b5cbe2344d00912f463 2024-12-12T19:34:18,059 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/660d5f556fdc4933a954e64a487a0202 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/660d5f556fdc4933a954e64a487a0202 2024-12-12T19:34:18,066 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/45aea74a9a9d48189889c86eca508718 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/45aea74a9a9d48189889c86eca508718 2024-12-12T19:34:18,070 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/aecd4882de03411c80221e28a081476c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/aecd4882de03411c80221e28a081476c 2024-12-12T19:34:18,134 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/recovered.edits/439.seqid, newMaxSeqId=439, maxSeqId=4 2024-12-12T19:34:18,140 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106. 2024-12-12T19:34:18,140 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1635): Region close journal for a329d898f2cbc923ae8747673ded3106: 2024-12-12T19:34:18,143 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=a329d898f2cbc923ae8747673ded3106, regionState=CLOSED 2024-12-12T19:34:18,147 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(170): Closed a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:18,159 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-12T19:34:18,159 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; CloseRegionProcedure a329d898f2cbc923ae8747673ded3106, server=4c9c438b6eeb,42689,1734031923038 in 1.2410 sec 2024-12-12T19:34:18,160 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=112, resume processing ppid=111 2024-12-12T19:34:18,160 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=111, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a329d898f2cbc923ae8747673ded3106, UNASSIGN in 1.2510 sec 2024-12-12T19:34:18,162 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-12T19:34:18,162 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.2540 sec 2024-12-12T19:34:18,164 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734032058164"}]},"ts":"1734032058164"} 2024-12-12T19:34:18,168 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T19:34:18,196 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T19:34:18,198 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.3520 sec 2024-12-12T19:34:18,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T19:34:18,966 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-12T19:34:18,966 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T19:34:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:18,968 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=114, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:18,968 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=114, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:18,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T19:34:18,987 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,030 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/recovered.edits] 2024-12-12T19:34:19,036 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/00732a1b4cc1440d8ae0a83ec18b4380 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/00732a1b4cc1440d8ae0a83ec18b4380 2024-12-12T19:34:19,038 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/cd2350fb2e8c4b2ea44e91b0d911a917 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/A/cd2350fb2e8c4b2ea44e91b0d911a917 2024-12-12T19:34:19,050 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/50b75f383df64f25bf5c8a1444307af2 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/50b75f383df64f25bf5c8a1444307af2 2024-12-12T19:34:19,050 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/fe8349e12c3e4c5e9a65f1644266b5a0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/B/fe8349e12c3e4c5e9a65f1644266b5a0 2024-12-12T19:34:19,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T19:34:19,071 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/690f62b6b88d4081884f24a49a7cb330 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/690f62b6b88d4081884f24a49a7cb330 2024-12-12T19:34:19,075 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/0fbac77d6dbd490fb4581937f27fd045 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/C/0fbac77d6dbd490fb4581937f27fd045 2024-12-12T19:34:19,114 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/recovered.edits/439.seqid to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106/recovered.edits/439.seqid 2024-12-12T19:34:19,115 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,119 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T19:34:19,124 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T19:34:19,132 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-12T19:34:19,191 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121202779f143fd540c8970736e22c7f0bfc_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121202779f143fd540c8970736e22c7f0bfc_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,192 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120bef41a503194cff8418bd04b6899719_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120bef41a503194cff8418bd04b6899719_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,194 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120f01154904504d6d9f1d66c13d60e928_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120f01154904504d6d9f1d66c13d60e928_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,195 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412122dfb87e0707648fa94f9114a32c5f0cc_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412122dfb87e0707648fa94f9114a32c5f0cc_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,200 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121236d1d52fd13f44c3afb5883e5a4807cf_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121236d1d52fd13f44c3afb5883e5a4807cf_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,201 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121254e5e5c671234422bb4bc9ee86984966_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121254e5e5c671234422bb4bc9ee86984966_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,202 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212664c9d06bcca4dceba8d77e7c1887099_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212664c9d06bcca4dceba8d77e7c1887099_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,202 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412125a6f2e184a3641319cf74be8d0d9381a_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412125a6f2e184a3641319cf74be8d0d9381a_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,202 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126eab50af3e6841faa4bcb6f1859fb490_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126eab50af3e6841faa4bcb6f1859fb490_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,202 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212669f719f868d4c8baea1ba71942b1fae_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212669f719f868d4c8baea1ba71942b1fae_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,202 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121224eaf5865f6742d9a46259081c210bd0_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121224eaf5865f6742d9a46259081c210bd0_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,205 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121270ffc173ecbb431a81e3990c12711933_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121270ffc173ecbb431a81e3990c12711933_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,205 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121279e4cbeea45842c08413d39c0847692d_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121279e4cbeea45842c08413d39c0847692d_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,206 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a38788adb5214ff98f6b106499ad5fb8_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a38788adb5214ff98f6b106499ad5fb8_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,206 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212cca452b599ec4432aaf209dcac24c3d8_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212cca452b599ec4432aaf209dcac24c3d8_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,206 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128c3990fda2e84542adbe7f79ceb49ddb_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128c3990fda2e84542adbe7f79ceb49ddb_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,207 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212191c276daaff47c1a2004bb3873309a8_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212191c276daaff47c1a2004bb3873309a8_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,207 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a9d97f30ea40461e8586bc98718b32db_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a9d97f30ea40461e8586bc98718b32db_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,209 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e29aafb8593740d2be9934ac3d8a1d43_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e29aafb8593740d2be9934ac3d8a1d43_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,209 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c4d4501cd95e4bd0a60aedd796451739_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c4d4501cd95e4bd0a60aedd796451739_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,209 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d43597fee5b64c18914e02123417787a_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d43597fee5b64c18914e02123417787a_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,209 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ef478ca5ade14c638608656110753e17_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ef478ca5ade14c638608656110753e17_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,210 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212cd2770e73d7342a0a7c78d6b0b3464a2_a329d898f2cbc923ae8747673ded3106 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212cd2770e73d7342a0a7c78d6b0b3464a2_a329d898f2cbc923ae8747673ded3106 2024-12-12T19:34:19,210 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T19:34:19,221 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=114, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:19,236 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T19:34:19,250 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T19:34:19,259 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=114, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:19,259 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T19:34:19,259 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734032059259"}]},"ts":"9223372036854775807"} 2024-12-12T19:34:19,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T19:34:19,279 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T19:34:19,279 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => a329d898f2cbc923ae8747673ded3106, NAME => 'TestAcidGuarantees,,1734032031131.a329d898f2cbc923ae8747673ded3106.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T19:34:19,279 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T19:34:19,280 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734032059279"}]},"ts":"9223372036854775807"} 2024-12-12T19:34:19,285 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T19:34:19,320 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=114, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:19,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 354 msec 2024-12-12T19:34:19,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T19:34:19,572 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-12T19:34:19,593 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=246 (was 243) - Thread LEAK? -, OpenFileDescriptor=455 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1476 (was 1421) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8042 (was 8508) 2024-12-12T19:34:19,628 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=246, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=1476, ProcessCount=11, AvailableMemoryMB=8038 2024-12-12T19:34:19,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T19:34:19,632 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T19:34:19,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=115, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:19,637 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T19:34:19,638 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:19,638 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 115 2024-12-12T19:34:19,640 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T19:34:19,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-12T19:34:19,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742247_1423 (size=963) 2024-12-12T19:34:19,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-12T19:34:19,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-12T19:34:20,111 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98 2024-12-12T19:34:20,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742248_1424 (size=53) 2024-12-12T19:34:20,188 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:34:20,188 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing d2250407ef6b64ed659c4133a7c4d89c, disabling compactions & flushes 2024-12-12T19:34:20,188 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:20,188 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:20,188 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. after waiting 0 ms 2024-12-12T19:34:20,188 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:20,189 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:20,189 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:20,190 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T19:34:20,190 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734032060190"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734032060190"}]},"ts":"1734032060190"} 2024-12-12T19:34:20,200 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T19:34:20,201 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T19:34:20,202 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734032060202"}]},"ts":"1734032060202"} 2024-12-12T19:34:20,208 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T19:34:20,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-12T19:34:20,268 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2250407ef6b64ed659c4133a7c4d89c, ASSIGN}] 2024-12-12T19:34:20,276 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2250407ef6b64ed659c4133a7c4d89c, ASSIGN 2024-12-12T19:34:20,279 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2250407ef6b64ed659c4133a7c4d89c, ASSIGN; state=OFFLINE, location=4c9c438b6eeb,42689,1734031923038; forceNewPlan=false, retain=false 2024-12-12T19:34:20,430 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=d2250407ef6b64ed659c4133a7c4d89c, regionState=OPENING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:20,432 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; OpenRegionProcedure d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:34:20,584 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:20,587 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:20,588 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7285): Opening region: {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} 2024-12-12T19:34:20,589 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:20,589 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:34:20,589 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7327): checking encryption for d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:20,589 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7330): checking classloading for d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:20,591 INFO [StoreOpener-d2250407ef6b64ed659c4133a7c4d89c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:20,593 INFO [StoreOpener-d2250407ef6b64ed659c4133a7c4d89c-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:34:20,593 INFO [StoreOpener-d2250407ef6b64ed659c4133a7c4d89c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2250407ef6b64ed659c4133a7c4d89c columnFamilyName A 2024-12-12T19:34:20,593 DEBUG [StoreOpener-d2250407ef6b64ed659c4133a7c4d89c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:20,594 INFO [StoreOpener-d2250407ef6b64ed659c4133a7c4d89c-1 {}] regionserver.HStore(327): Store=d2250407ef6b64ed659c4133a7c4d89c/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:34:20,594 INFO [StoreOpener-d2250407ef6b64ed659c4133a7c4d89c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:20,597 INFO [StoreOpener-d2250407ef6b64ed659c4133a7c4d89c-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:34:20,597 INFO [StoreOpener-d2250407ef6b64ed659c4133a7c4d89c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2250407ef6b64ed659c4133a7c4d89c columnFamilyName B 2024-12-12T19:34:20,597 DEBUG [StoreOpener-d2250407ef6b64ed659c4133a7c4d89c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:20,600 INFO [StoreOpener-d2250407ef6b64ed659c4133a7c4d89c-1 {}] regionserver.HStore(327): Store=d2250407ef6b64ed659c4133a7c4d89c/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:34:20,600 INFO [StoreOpener-d2250407ef6b64ed659c4133a7c4d89c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:20,601 INFO [StoreOpener-d2250407ef6b64ed659c4133a7c4d89c-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:34:20,602 INFO [StoreOpener-d2250407ef6b64ed659c4133a7c4d89c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2250407ef6b64ed659c4133a7c4d89c columnFamilyName C 2024-12-12T19:34:20,602 DEBUG [StoreOpener-d2250407ef6b64ed659c4133a7c4d89c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:20,602 INFO [StoreOpener-d2250407ef6b64ed659c4133a7c4d89c-1 {}] regionserver.HStore(327): Store=d2250407ef6b64ed659c4133a7c4d89c/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:34:20,603 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:20,604 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:20,605 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:20,609 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T19:34:20,610 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1085): writing seq id for d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:20,612 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T19:34:20,613 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1102): Opened d2250407ef6b64ed659c4133a7c4d89c; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62669402, jitterRate=-0.0661531388759613}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T19:34:20,614 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1001): Region open journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:20,615 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., pid=117, masterSystemTime=1734032060583 2024-12-12T19:34:20,624 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:20,624 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:20,625 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=d2250407ef6b64ed659c4133a7c4d89c, regionState=OPEN, openSeqNum=2, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:20,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-12T19:34:20,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; OpenRegionProcedure d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 in 206 msec 2024-12-12T19:34:20,643 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=116, resume processing ppid=115 2024-12-12T19:34:20,643 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2250407ef6b64ed659c4133a7c4d89c, ASSIGN in 373 msec 2024-12-12T19:34:20,644 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T19:34:20,644 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734032060644"}]},"ts":"1734032060644"} 2024-12-12T19:34:20,656 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T19:34:20,678 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T19:34:20,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.0460 sec 2024-12-12T19:34:20,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-12T19:34:20,747 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 115 completed 2024-12-12T19:34:20,748 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7daa5922 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b8b6e04 2024-12-12T19:34:20,781 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ed69825, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:20,782 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:20,786 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51366, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:20,789 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T19:34:20,790 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42228, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T19:34:20,792 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b7f20c4 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bc486e1 2024-12-12T19:34:20,803 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11193a0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:20,804 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f7c40ba to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2070263a 2024-12-12T19:34:20,833 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7861b162, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:20,834 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x41b0e7b6 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6050584c 2024-12-12T19:34:20,873 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@154f0f85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:20,875 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0f2423f3 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6dd48863 2024-12-12T19:34:20,903 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a917b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:20,904 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x184771cf to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51196534 2024-12-12T19:34:20,953 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54c2725, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:20,954 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1dc5e114 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79d49886 2024-12-12T19:34:21,006 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73d92042, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:21,007 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3e96b8ad to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@635b1751 2024-12-12T19:34:21,035 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@593af048, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:21,036 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17e5a47d to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2cbfd84f 2024-12-12T19:34:21,090 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2209c520, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:21,092 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d9954b7 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3fb684eb 2024-12-12T19:34:21,131 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@537a66f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:21,134 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d5efb7a to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@644b7e6 2024-12-12T19:34:21,155 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6094c70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:21,163 DEBUG [hconnection-0x8b32d56-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:21,164 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:21,164 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51370, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:21,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-12-12T19:34:21,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T19:34:21,166 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:21,166 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:21,166 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:21,167 DEBUG [hconnection-0x524bff33-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:21,169 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51380, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:21,170 DEBUG [hconnection-0x5ec32a7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:21,170 DEBUG [hconnection-0x7222a51b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:21,171 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51396, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:21,171 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51400, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:21,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:21,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T19:34:21,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:21,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:21,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:21,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:21,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:21,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:21,180 DEBUG [hconnection-0xa302383-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:21,182 DEBUG [hconnection-0x211a9e92-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:21,182 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51408, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:21,185 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51424, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:21,193 DEBUG [hconnection-0x7572ca04-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:21,195 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51440, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:21,198 DEBUG [hconnection-0x79ba3b3f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:21,199 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51454, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:21,203 DEBUG [hconnection-0x17e306d8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:21,204 DEBUG [hconnection-0x54c5e52e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:21,205 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51468, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:21,206 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51478, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:21,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032121222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032121225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032121227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032121238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032121238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/cb801cf7329f4663a2df75d31c49d3a6 is 50, key is test_row_0/A:col10/1734032061177/Put/seqid=0 2024-12-12T19:34:21,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T19:34:21,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742249_1425 (size=12001) 2024-12-12T19:34:21,298 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/cb801cf7329f4663a2df75d31c49d3a6 2024-12-12T19:34:21,323 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,327 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T19:34:21,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:21,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:21,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:21,328 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:21,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:21,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:21,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032121332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032121335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032121340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032121346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032121351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,352 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/f1c2a04fed9b41d2a2875db872a2f989 is 50, key is test_row_0/B:col10/1734032061177/Put/seqid=0 2024-12-12T19:34:21,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742250_1426 (size=12001) 2024-12-12T19:34:21,391 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/f1c2a04fed9b41d2a2875db872a2f989 2024-12-12T19:34:21,445 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/540b897376174d4e803f497e6f8b7958 is 50, key is test_row_0/C:col10/1734032061177/Put/seqid=0 2024-12-12T19:34:21,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T19:34:21,491 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T19:34:21,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:21,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:21,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:21,498 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:21,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:21,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:21,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742251_1427 (size=12001) 2024-12-12T19:34:21,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032121539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032121544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032121551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032121552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032121566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,663 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T19:34:21,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:21,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:21,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:21,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:21,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:21,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:21,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T19:34:21,830 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T19:34:21,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:21,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:21,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:21,832 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:21,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:21,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:21,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032121852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032121852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032121866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032121867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:21,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032121887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:21,937 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/540b897376174d4e803f497e6f8b7958 2024-12-12T19:34:21,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/cb801cf7329f4663a2df75d31c49d3a6 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/cb801cf7329f4663a2df75d31c49d3a6 2024-12-12T19:34:21,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/cb801cf7329f4663a2df75d31c49d3a6, entries=150, sequenceid=14, filesize=11.7 K 2024-12-12T19:34:21,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/f1c2a04fed9b41d2a2875db872a2f989 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/f1c2a04fed9b41d2a2875db872a2f989 2024-12-12T19:34:22,012 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/f1c2a04fed9b41d2a2875db872a2f989, entries=150, sequenceid=14, filesize=11.7 K 2024-12-12T19:34:22,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/540b897376174d4e803f497e6f8b7958 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/540b897376174d4e803f497e6f8b7958 2024-12-12T19:34:22,015 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:22,018 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T19:34:22,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:22,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:22,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:22,018 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:22,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:22,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:22,034 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/540b897376174d4e803f497e6f8b7958, entries=150, sequenceid=14, filesize=11.7 K 2024-12-12T19:34:22,041 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for d2250407ef6b64ed659c4133a7c4d89c in 861ms, sequenceid=14, compaction requested=false 2024-12-12T19:34:22,041 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-12T19:34:22,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:22,177 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:22,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T19:34:22,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:22,178 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T19:34:22,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:22,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:22,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:22,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:22,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:22,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:22,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/f992632eca724b32bda8ee77d2e2eac3 is 50, key is test_row_0/A:col10/1734032061224/Put/seqid=0 2024-12-12T19:34:22,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742252_1428 (size=12001) 2024-12-12T19:34:22,204 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/f992632eca724b32bda8ee77d2e2eac3 2024-12-12T19:34:22,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/4f03567fd4a6466dbf5ea80cac36d50f is 50, key is test_row_0/B:col10/1734032061224/Put/seqid=0 2024-12-12T19:34:22,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742253_1429 (size=12001) 2024-12-12T19:34:22,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T19:34:22,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:22,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:22,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:22,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032122374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:22,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:22,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032122376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:22,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:22,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032122377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:22,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:22,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032122378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:22,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:22,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032122399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:22,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:22,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032122480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:22,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:22,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032122483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:22,642 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/4f03567fd4a6466dbf5ea80cac36d50f 2024-12-12T19:34:22,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/db054f357131468a8969d5f5223ca71a is 50, key is test_row_0/C:col10/1734032061224/Put/seqid=0 2024-12-12T19:34:22,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:22,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032122688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:22,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742254_1430 (size=12001) 2024-12-12T19:34:22,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:22,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032122691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:23,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032122999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:23,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032123001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,094 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/db054f357131468a8969d5f5223ca71a 2024-12-12T19:34:23,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/f992632eca724b32bda8ee77d2e2eac3 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/f992632eca724b32bda8ee77d2e2eac3 2024-12-12T19:34:23,154 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/f992632eca724b32bda8ee77d2e2eac3, entries=150, sequenceid=39, filesize=11.7 K 2024-12-12T19:34:23,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/4f03567fd4a6466dbf5ea80cac36d50f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/4f03567fd4a6466dbf5ea80cac36d50f 2024-12-12T19:34:23,164 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/4f03567fd4a6466dbf5ea80cac36d50f, entries=150, sequenceid=39, filesize=11.7 K 2024-12-12T19:34:23,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/db054f357131468a8969d5f5223ca71a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/db054f357131468a8969d5f5223ca71a 2024-12-12T19:34:23,197 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/db054f357131468a8969d5f5223ca71a, entries=150, sequenceid=39, filesize=11.7 K 2024-12-12T19:34:23,199 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d2250407ef6b64ed659c4133a7c4d89c in 1021ms, sequenceid=39, compaction requested=false 2024-12-12T19:34:23,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:23,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:23,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-12-12T19:34:23,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-12-12T19:34:23,209 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-12T19:34:23,209 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0410 sec 2024-12-12T19:34:23,211 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 2.0450 sec 2024-12-12T19:34:23,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T19:34:23,287 INFO [Thread-1919 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-12T19:34:23,304 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:23,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-12-12T19:34:23,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T19:34:23,308 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:23,308 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:23,309 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:23,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:23,394 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:34:23,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:23,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:23,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:23,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:23,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:23,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:23,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/2663977b051a4c20ad280e9bda849502 is 50, key is test_row_0/A:col10/1734032063389/Put/seqid=0 2024-12-12T19:34:23,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T19:34:23,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742255_1431 (size=12001) 2024-12-12T19:34:23,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/2663977b051a4c20ad280e9bda849502 2024-12-12T19:34:23,462 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,462 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T19:34:23,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:23,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:23,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:23,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:23,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:23,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:23,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/bd089b1a4712483988a32d78aa027fec is 50, key is test_row_0/B:col10/1734032063389/Put/seqid=0 2024-12-12T19:34:23,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:23,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032123469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:23,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032123471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:23,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032123479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742256_1432 (size=12001) 2024-12-12T19:34:23,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:23,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032123508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:23,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032123510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:23,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032123581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:23,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032123587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:23,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032123588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T19:34:23,619 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,620 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T19:34:23,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:23,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:23,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:23,620 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:23,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:23,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:23,670 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T19:34:23,775 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T19:34:23,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:23,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:23,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:23,783 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:23,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:23,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:23,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:23,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032123795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:23,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032123799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:23,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032123799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,909 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/bd089b1a4712483988a32d78aa027fec 2024-12-12T19:34:23,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T19:34:23,931 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/83c3c252b75147c5b2e58252fefa1fa0 is 50, key is test_row_0/C:col10/1734032063389/Put/seqid=0 2024-12-12T19:34:23,946 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:23,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742257_1433 (size=12001) 2024-12-12T19:34:23,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T19:34:23,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:23,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:23,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:23,947 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:23,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:23,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:23,952 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/83c3c252b75147c5b2e58252fefa1fa0 2024-12-12T19:34:23,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/2663977b051a4c20ad280e9bda849502 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/2663977b051a4c20ad280e9bda849502 2024-12-12T19:34:24,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/2663977b051a4c20ad280e9bda849502, entries=150, sequenceid=52, filesize=11.7 K 2024-12-12T19:34:24,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/bd089b1a4712483988a32d78aa027fec as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/bd089b1a4712483988a32d78aa027fec 2024-12-12T19:34:24,072 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/bd089b1a4712483988a32d78aa027fec, entries=150, sequenceid=52, filesize=11.7 K 2024-12-12T19:34:24,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/83c3c252b75147c5b2e58252fefa1fa0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/83c3c252b75147c5b2e58252fefa1fa0 2024-12-12T19:34:24,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:24,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032124100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,106 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T19:34:24,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:24,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:24,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:24,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:24,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:24,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:24,109 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/83c3c252b75147c5b2e58252fefa1fa0, entries=150, sequenceid=52, filesize=11.7 K 2024-12-12T19:34:24,113 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d2250407ef6b64ed659c4133a7c4d89c in 719ms, sequenceid=52, compaction requested=true 2024-12-12T19:34:24,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:24,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:24,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:24,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:24,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:24,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:24,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:24,114 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:24,114 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:24,114 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T19:34:24,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:24,114 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:24,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:24,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:24,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:24,114 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/B is initiating minor compaction (all files) 2024-12-12T19:34:24,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:24,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:24,115 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/B in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:24,115 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/f1c2a04fed9b41d2a2875db872a2f989, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/4f03567fd4a6466dbf5ea80cac36d50f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/bd089b1a4712483988a32d78aa027fec] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=35.2 K 2024-12-12T19:34:24,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:24,115 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:24,115 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting f1c2a04fed9b41d2a2875db872a2f989, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734032061169 2024-12-12T19:34:24,115 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/A is initiating minor compaction (all files) 2024-12-12T19:34:24,115 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/A in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:24,115 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/cb801cf7329f4663a2df75d31c49d3a6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/f992632eca724b32bda8ee77d2e2eac3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/2663977b051a4c20ad280e9bda849502] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=35.2 K 2024-12-12T19:34:24,122 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb801cf7329f4663a2df75d31c49d3a6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734032061169 2024-12-12T19:34:24,122 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f03567fd4a6466dbf5ea80cac36d50f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1734032061198 2024-12-12T19:34:24,123 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting bd089b1a4712483988a32d78aa027fec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734032062371 2024-12-12T19:34:24,123 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting f992632eca724b32bda8ee77d2e2eac3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1734032061198 2024-12-12T19:34:24,124 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2663977b051a4c20ad280e9bda849502, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734032062371 2024-12-12T19:34:24,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/081e8f7076cd49ea969083d7663b99fb is 50, key is test_row_0/A:col10/1734032063467/Put/seqid=0 2024-12-12T19:34:24,149 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#A#compaction#355 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:24,149 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/19de58ffafe044d0a23b6b6b4193ccb3 is 50, key is test_row_0/A:col10/1734032063389/Put/seqid=0 2024-12-12T19:34:24,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:24,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032124155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:24,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032124160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,169 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#B#compaction#356 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:24,170 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/4e7c43e92bdf4d01881aa1dadf8b05df is 50, key is test_row_0/B:col10/1734032063389/Put/seqid=0 2024-12-12T19:34:24,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742258_1434 (size=14341) 2024-12-12T19:34:24,200 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/081e8f7076cd49ea969083d7663b99fb 2024-12-12T19:34:24,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/844fc1cbd23e48db881a3dfbe893cbc0 is 50, key is test_row_0/B:col10/1734032063467/Put/seqid=0 2024-12-12T19:34:24,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742259_1435 (size=12104) 2024-12-12T19:34:24,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742260_1436 (size=12104) 2024-12-12T19:34:24,260 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T19:34:24,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:24,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:24,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:24,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:24,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:24,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:24,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:24,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032124267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742261_1437 (size=12001) 2024-12-12T19:34:24,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:24,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032124268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,415 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,416 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T19:34:24,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:24,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:24,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:24,416 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:24,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T19:34:24,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032124476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032124480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,524 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032124523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032124524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,568 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T19:34:24,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:24,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:24,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:24,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:24,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:24,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:24,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032124617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,667 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/19de58ffafe044d0a23b6b6b4193ccb3 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/19de58ffafe044d0a23b6b6b4193ccb3 2024-12-12T19:34:24,680 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/844fc1cbd23e48db881a3dfbe893cbc0 2024-12-12T19:34:24,704 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/A of d2250407ef6b64ed659c4133a7c4d89c into 19de58ffafe044d0a23b6b6b4193ccb3(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:24,704 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:24,704 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/A, priority=13, startTime=1734032064113; duration=0sec 2024-12-12T19:34:24,704 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:24,704 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:A 2024-12-12T19:34:24,704 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:24,704 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/4e7c43e92bdf4d01881aa1dadf8b05df as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/4e7c43e92bdf4d01881aa1dadf8b05df 2024-12-12T19:34:24,713 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:24,714 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/C is initiating minor compaction (all files) 2024-12-12T19:34:24,714 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/C in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:24,714 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/540b897376174d4e803f497e6f8b7958, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/db054f357131468a8969d5f5223ca71a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/83c3c252b75147c5b2e58252fefa1fa0] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=35.2 K 2024-12-12T19:34:24,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/175ec001c1574aaa9f2954ab73f51b7d is 50, key is test_row_0/C:col10/1734032063467/Put/seqid=0 2024-12-12T19:34:24,718 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 540b897376174d4e803f497e6f8b7958, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734032061169 2024-12-12T19:34:24,719 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting db054f357131468a8969d5f5223ca71a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1734032061198 2024-12-12T19:34:24,719 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83c3c252b75147c5b2e58252fefa1fa0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734032062371 2024-12-12T19:34:24,729 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T19:34:24,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:24,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:24,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:24,730 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:24,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:24,751 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/B of d2250407ef6b64ed659c4133a7c4d89c into 4e7c43e92bdf4d01881aa1dadf8b05df(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:24,751 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:24,751 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/B, priority=13, startTime=1734032064114; duration=0sec 2024-12-12T19:34:24,751 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:24,751 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:B 2024-12-12T19:34:24,769 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#C#compaction#359 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:24,770 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/7e24e2cc0eb74e39872a954a0c91e445 is 50, key is test_row_0/C:col10/1734032063389/Put/seqid=0 2024-12-12T19:34:24,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742262_1438 (size=12001) 2024-12-12T19:34:24,779 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/175ec001c1574aaa9f2954ab73f51b7d 2024-12-12T19:34:24,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:24,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032124792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742263_1439 (size=12104) 2024-12-12T19:34:24,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/081e8f7076cd49ea969083d7663b99fb as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/081e8f7076cd49ea969083d7663b99fb 2024-12-12T19:34:24,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:24,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032124799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,824 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/7e24e2cc0eb74e39872a954a0c91e445 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/7e24e2cc0eb74e39872a954a0c91e445 2024-12-12T19:34:24,829 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/081e8f7076cd49ea969083d7663b99fb, entries=200, sequenceid=76, filesize=14.0 K 2024-12-12T19:34:24,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/844fc1cbd23e48db881a3dfbe893cbc0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/844fc1cbd23e48db881a3dfbe893cbc0 2024-12-12T19:34:24,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/844fc1cbd23e48db881a3dfbe893cbc0, entries=150, sequenceid=76, filesize=11.7 K 2024-12-12T19:34:24,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/175ec001c1574aaa9f2954ab73f51b7d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/175ec001c1574aaa9f2954ab73f51b7d 2024-12-12T19:34:24,848 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/175ec001c1574aaa9f2954ab73f51b7d, entries=150, sequenceid=76, filesize=11.7 K 2024-12-12T19:34:24,852 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d2250407ef6b64ed659c4133a7c4d89c in 738ms, sequenceid=76, compaction requested=false 2024-12-12T19:34:24,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:24,873 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/C of d2250407ef6b64ed659c4133a7c4d89c into 7e24e2cc0eb74e39872a954a0c91e445(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:24,873 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:24,873 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/C, priority=13, startTime=1734032064114; duration=0sec 2024-12-12T19:34:24,873 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:24,873 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:C 2024-12-12T19:34:24,883 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:24,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T19:34:24,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:24,884 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:34:24,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:24,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:24,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:24,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:24,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:24,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:24,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/d4bd14172f9b4e70a98a036e8c725799 is 50, key is test_row_0/A:col10/1734032064142/Put/seqid=0 2024-12-12T19:34:24,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742264_1440 (size=12001) 2024-12-12T19:34:24,975 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/d4bd14172f9b4e70a98a036e8c725799 2024-12-12T19:34:25,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/1c4cfd8ec5f24ec6aa7e9cd0974dbc62 is 50, key is test_row_0/B:col10/1734032064142/Put/seqid=0 2024-12-12T19:34:25,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742265_1441 (size=12001) 2024-12-12T19:34:25,055 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/1c4cfd8ec5f24ec6aa7e9cd0974dbc62 2024-12-12T19:34:25,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/4cf14239d40244eea6d639e7ef76d8db is 50, key is test_row_0/C:col10/1734032064142/Put/seqid=0 2024-12-12T19:34:25,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742266_1442 (size=12001) 2024-12-12T19:34:25,127 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/4cf14239d40244eea6d639e7ef76d8db 2024-12-12T19:34:25,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/d4bd14172f9b4e70a98a036e8c725799 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/d4bd14172f9b4e70a98a036e8c725799 2024-12-12T19:34:25,169 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/d4bd14172f9b4e70a98a036e8c725799, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T19:34:25,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/1c4cfd8ec5f24ec6aa7e9cd0974dbc62 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/1c4cfd8ec5f24ec6aa7e9cd0974dbc62 2024-12-12T19:34:25,205 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/1c4cfd8ec5f24ec6aa7e9cd0974dbc62, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T19:34:25,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/4cf14239d40244eea6d639e7ef76d8db as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/4cf14239d40244eea6d639e7ef76d8db 2024-12-12T19:34:25,230 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/4cf14239d40244eea6d639e7ef76d8db, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T19:34:25,231 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=0 B/0 for d2250407ef6b64ed659c4133a7c4d89c in 347ms, sequenceid=91, compaction requested=true 2024-12-12T19:34:25,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:25,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:25,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-12T19:34:25,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-12-12T19:34:25,237 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-12T19:34:25,237 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9240 sec 2024-12-12T19:34:25,238 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 1.9330 sec 2024-12-12T19:34:25,328 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T19:34:25,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:25,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:25,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:25,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:25,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:25,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:25,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:25,357 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/026ed9a79eea4eafa81d650d1c90c21f is 50, key is test_row_0/A:col10/1734032065323/Put/seqid=0 2024-12-12T19:34:25,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742267_1443 (size=16681) 2024-12-12T19:34:25,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T19:34:25,431 INFO [Thread-1919 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-12T19:34:25,442 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:25,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-12-12T19:34:25,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T19:34:25,448 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:25,448 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:25,448 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:25,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:25,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032125447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:25,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:25,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032125455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:25,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T19:34:25,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:25,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032125563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:25,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:25,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032125575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:25,600 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:25,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-12T19:34:25,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:25,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:25,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:25,605 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:25,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:25,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:25,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:25,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032125629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:25,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T19:34:25,771 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:25,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-12T19:34:25,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:25,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:25,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:25,774 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:25,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:25,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:25,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:25,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032125776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:25,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:25,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032125791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:25,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/026ed9a79eea4eafa81d650d1c90c21f 2024-12-12T19:34:25,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/551e09800bdf4ff8921802a8ef5a0178 is 50, key is test_row_0/B:col10/1734032065323/Put/seqid=0 2024-12-12T19:34:25,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742268_1444 (size=12001) 2024-12-12T19:34:25,935 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:25,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-12T19:34:25,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:25,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:25,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:25,939 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:25,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:25,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T19:34:26,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:26,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032126085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:26,095 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:26,096 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-12T19:34:26,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:26,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:26,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:26,096 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:26,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032126097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:26,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,262 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:26,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-12T19:34:26,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:26,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:26,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:26,264 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,302 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/551e09800bdf4ff8921802a8ef5a0178 2024-12-12T19:34:26,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/6359a938739e46d9b2d56f97d5cc8b05 is 50, key is test_row_0/C:col10/1734032065323/Put/seqid=0 2024-12-12T19:34:26,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742269_1445 (size=12001) 2024-12-12T19:34:26,389 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/6359a938739e46d9b2d56f97d5cc8b05 2024-12-12T19:34:26,420 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:26,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-12T19:34:26,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:26,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:26,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:26,421 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/026ed9a79eea4eafa81d650d1c90c21f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/026ed9a79eea4eafa81d650d1c90c21f 2024-12-12T19:34:26,478 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/026ed9a79eea4eafa81d650d1c90c21f, entries=250, sequenceid=103, filesize=16.3 K 2024-12-12T19:34:26,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/551e09800bdf4ff8921802a8ef5a0178 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/551e09800bdf4ff8921802a8ef5a0178 2024-12-12T19:34:26,488 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/551e09800bdf4ff8921802a8ef5a0178, entries=150, sequenceid=103, filesize=11.7 K 2024-12-12T19:34:26,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/6359a938739e46d9b2d56f97d5cc8b05 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/6359a938739e46d9b2d56f97d5cc8b05 2024-12-12T19:34:26,492 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/6359a938739e46d9b2d56f97d5cc8b05, entries=150, sequenceid=103, filesize=11.7 K 2024-12-12T19:34:26,497 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d2250407ef6b64ed659c4133a7c4d89c in 1169ms, sequenceid=103, compaction requested=true 2024-12-12T19:34:26,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:26,497 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:26,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:26,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:26,498 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:26,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:26,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:26,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:26,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:26,500 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:26,500 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55127 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:26,500 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/B is initiating minor compaction (all files) 2024-12-12T19:34:26,500 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/A is initiating minor compaction (all files) 2024-12-12T19:34:26,500 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/B in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:26,500 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/A in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:26,500 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/4e7c43e92bdf4d01881aa1dadf8b05df, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/844fc1cbd23e48db881a3dfbe893cbc0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/1c4cfd8ec5f24ec6aa7e9cd0974dbc62, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/551e09800bdf4ff8921802a8ef5a0178] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=47.0 K 2024-12-12T19:34:26,500 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/19de58ffafe044d0a23b6b6b4193ccb3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/081e8f7076cd49ea969083d7663b99fb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/d4bd14172f9b4e70a98a036e8c725799, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/026ed9a79eea4eafa81d650d1c90c21f] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=53.8 K 2024-12-12T19:34:26,501 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19de58ffafe044d0a23b6b6b4193ccb3, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734032062371 2024-12-12T19:34:26,501 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e7c43e92bdf4d01881aa1dadf8b05df, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734032062371 2024-12-12T19:34:26,501 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 081e8f7076cd49ea969083d7663b99fb, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734032063467 2024-12-12T19:34:26,501 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 844fc1cbd23e48db881a3dfbe893cbc0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734032063467 2024-12-12T19:34:26,501 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4bd14172f9b4e70a98a036e8c725799, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734032064140 2024-12-12T19:34:26,501 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c4cfd8ec5f24ec6aa7e9cd0974dbc62, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734032064140 2024-12-12T19:34:26,501 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 026ed9a79eea4eafa81d650d1c90c21f, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1734032065311 2024-12-12T19:34:26,501 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 551e09800bdf4ff8921802a8ef5a0178, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1734032065311 2024-12-12T19:34:26,517 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#A#compaction#366 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:26,518 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/b9dfb625ff284093baf37de4ca332601 is 50, key is test_row_0/A:col10/1734032065323/Put/seqid=0 2024-12-12T19:34:26,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:26,543 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T19:34:26,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:26,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:26,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:26,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:26,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:26,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:26,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T19:34:26,553 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#B#compaction#367 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:26,554 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/8835f2cc125548b3b43f285a84e8cbbb is 50, key is test_row_0/B:col10/1734032065323/Put/seqid=0 2024-12-12T19:34:26,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742270_1446 (size=12241) 2024-12-12T19:34:26,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/1ae883b5a26749338f6e4e3cfc155eeb is 50, key is test_row_0/A:col10/1734032066541/Put/seqid=0 2024-12-12T19:34:26,567 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/b9dfb625ff284093baf37de4ca332601 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/b9dfb625ff284093baf37de4ca332601 2024-12-12T19:34:26,575 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:26,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-12T19:34:26,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:26,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:26,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:26,576 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,580 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/A of d2250407ef6b64ed659c4133a7c4d89c into b9dfb625ff284093baf37de4ca332601(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:26,581 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:26,581 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/A, priority=12, startTime=1734032066497; duration=0sec 2024-12-12T19:34:26,581 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:26,581 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:A 2024-12-12T19:34:26,581 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:26,582 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:26,582 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/C is initiating minor compaction (all files) 2024-12-12T19:34:26,582 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/C in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:26,582 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/7e24e2cc0eb74e39872a954a0c91e445, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/175ec001c1574aaa9f2954ab73f51b7d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/4cf14239d40244eea6d639e7ef76d8db, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/6359a938739e46d9b2d56f97d5cc8b05] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=47.0 K 2024-12-12T19:34:26,585 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e24e2cc0eb74e39872a954a0c91e445, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734032062371 2024-12-12T19:34:26,586 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 175ec001c1574aaa9f2954ab73f51b7d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734032063467 2024-12-12T19:34:26,587 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4cf14239d40244eea6d639e7ef76d8db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734032064140 2024-12-12T19:34:26,588 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6359a938739e46d9b2d56f97d5cc8b05, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1734032065311 2024-12-12T19:34:26,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:26,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032126579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:26,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:26,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032126581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:26,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742272_1448 (size=14341) 2024-12-12T19:34:26,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:26,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032126590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:26,600 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/1ae883b5a26749338f6e4e3cfc155eeb 2024-12-12T19:34:26,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742271_1447 (size=12241) 2024-12-12T19:34:26,602 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#C#compaction#369 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:26,603 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/d35abc8bf239434b982f69c21183b901 is 50, key is test_row_0/C:col10/1734032065323/Put/seqid=0 2024-12-12T19:34:26,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:26,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032126601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:26,625 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/5d08df2b30104da485120b85ff79cb15 is 50, key is test_row_0/B:col10/1734032066541/Put/seqid=0 2024-12-12T19:34:26,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742273_1449 (size=12241) 2024-12-12T19:34:26,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742274_1450 (size=12001) 2024-12-12T19:34:26,671 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/5d08df2b30104da485120b85ff79cb15 2024-12-12T19:34:26,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:26,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032126697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:26,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:26,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032126702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:26,715 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/d35abc8bf239434b982f69c21183b901 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/d35abc8bf239434b982f69c21183b901 2024-12-12T19:34:26,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/b2b613887ce443e591ae5fe666a80612 is 50, key is test_row_0/C:col10/1734032066541/Put/seqid=0 2024-12-12T19:34:26,738 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:26,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-12T19:34:26,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:26,738 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/C of d2250407ef6b64ed659c4133a7c4d89c into d35abc8bf239434b982f69c21183b901(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:26,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:26,738 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:26,738 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/C, priority=12, startTime=1734032066498; duration=0sec 2024-12-12T19:34:26,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:26,738 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:26,738 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:C 2024-12-12T19:34:26,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742275_1451 (size=12001) 2024-12-12T19:34:26,899 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:26,900 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-12T19:34:26,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:26,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:26,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:26,901 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:26,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:26,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032126914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:26,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:26,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032126920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:27,027 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/8835f2cc125548b3b43f285a84e8cbbb as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/8835f2cc125548b3b43f285a84e8cbbb 2024-12-12T19:34:27,050 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/B of d2250407ef6b64ed659c4133a7c4d89c into 8835f2cc125548b3b43f285a84e8cbbb(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:27,050 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:27,050 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/B, priority=12, startTime=1734032066498; duration=0sec 2024-12-12T19:34:27,050 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:27,050 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:B 2024-12-12T19:34:27,066 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:27,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-12T19:34:27,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:27,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:27,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:27,071 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:27,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:27,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:27,158 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/b2b613887ce443e591ae5fe666a80612 2024-12-12T19:34:27,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/1ae883b5a26749338f6e4e3cfc155eeb as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/1ae883b5a26749338f6e4e3cfc155eeb 2024-12-12T19:34:27,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/1ae883b5a26749338f6e4e3cfc155eeb, entries=200, sequenceid=128, filesize=14.0 K 2024-12-12T19:34:27,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/5d08df2b30104da485120b85ff79cb15 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/5d08df2b30104da485120b85ff79cb15 2024-12-12T19:34:27,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:27,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032127226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:27,232 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:27,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-12T19:34:27,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:27,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:27,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:27,233 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:27,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:27,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:27,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032127233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:27,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:27,267 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/5d08df2b30104da485120b85ff79cb15, entries=150, sequenceid=128, filesize=11.7 K 2024-12-12T19:34:27,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/b2b613887ce443e591ae5fe666a80612 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/b2b613887ce443e591ae5fe666a80612 2024-12-12T19:34:27,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/b2b613887ce443e591ae5fe666a80612, entries=150, sequenceid=128, filesize=11.7 K 2024-12-12T19:34:27,305 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for d2250407ef6b64ed659c4133a7c4d89c in 763ms, sequenceid=128, compaction requested=false 2024-12-12T19:34:27,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:27,390 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:27,391 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-12T19:34:27,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:27,392 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:34:27,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:27,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:27,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:27,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:27,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:27,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:27,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/1e74f191329949e7a1602a6b14e2e6c9 is 50, key is test_row_0/A:col10/1734032066578/Put/seqid=0 2024-12-12T19:34:27,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742276_1452 (size=12151) 2024-12-12T19:34:27,459 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/1e74f191329949e7a1602a6b14e2e6c9 2024-12-12T19:34:27,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/27bcbd9d5291485190beb7078cae107d is 50, key is test_row_0/B:col10/1734032066578/Put/seqid=0 2024-12-12T19:34:27,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T19:34:27,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742277_1453 (size=12151) 2024-12-12T19:34:27,603 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/27bcbd9d5291485190beb7078cae107d 2024-12-12T19:34:27,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:27,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:27,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/674d0d58d014477ea24e909391c7a4b0 is 50, key is test_row_0/C:col10/1734032066578/Put/seqid=0 2024-12-12T19:34:27,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:27,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032127708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:27,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:27,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032127716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:27,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:27,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032127716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:27,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742278_1454 (size=12151) 2024-12-12T19:34:27,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:27,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032127730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:27,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:27,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032127740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:27,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:27,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032127819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:27,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:27,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032127827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:27,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032127831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:28,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:28,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032128024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:28,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:28,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032128042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:28,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:28,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032128045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:28,134 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/674d0d58d014477ea24e909391c7a4b0 2024-12-12T19:34:28,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/1e74f191329949e7a1602a6b14e2e6c9 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/1e74f191329949e7a1602a6b14e2e6c9 2024-12-12T19:34:28,175 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/1e74f191329949e7a1602a6b14e2e6c9, entries=150, sequenceid=143, filesize=11.9 K 2024-12-12T19:34:28,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/27bcbd9d5291485190beb7078cae107d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/27bcbd9d5291485190beb7078cae107d 2024-12-12T19:34:28,205 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/27bcbd9d5291485190beb7078cae107d, entries=150, sequenceid=143, filesize=11.9 K 2024-12-12T19:34:28,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/674d0d58d014477ea24e909391c7a4b0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/674d0d58d014477ea24e909391c7a4b0 2024-12-12T19:34:28,245 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/674d0d58d014477ea24e909391c7a4b0, entries=150, sequenceid=143, filesize=11.9 K 2024-12-12T19:34:28,246 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for d2250407ef6b64ed659c4133a7c4d89c in 855ms, sequenceid=143, compaction requested=true 2024-12-12T19:34:28,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:28,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:28,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-12T19:34:28,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-12T19:34:28,260 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-12T19:34:28,260 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8030 sec 2024-12-12T19:34:28,261 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 2.8190 sec 2024-12-12T19:34:28,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:28,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T19:34:28,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:28,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:28,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:28,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:28,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:28,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:28,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/b077b0b574aa4ad494f83d5239ad71c9 is 50, key is test_row_0/A:col10/1734032067694/Put/seqid=0 2024-12-12T19:34:28,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742279_1455 (size=14541) 2024-12-12T19:34:28,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:28,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032128383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:28,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:28,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032128392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:28,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:28,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032128394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:28,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:28,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032128496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:28,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:28,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032128520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:28,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:28,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032128523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:28,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:28,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032128715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:28,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:28,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032128728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:28,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:28,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032128734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:28,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:28,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032128756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:28,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:28,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032128758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:28,798 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/b077b0b574aa4ad494f83d5239ad71c9 2024-12-12T19:34:28,821 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/89873d353f604e41954fd470e17ca0f2 is 50, key is test_row_0/B:col10/1734032067694/Put/seqid=0 2024-12-12T19:34:28,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742280_1456 (size=12151) 2024-12-12T19:34:28,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/89873d353f604e41954fd470e17ca0f2 2024-12-12T19:34:28,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/8e4532ed076a4adbb227696994951f50 is 50, key is test_row_0/C:col10/1734032067694/Put/seqid=0 2024-12-12T19:34:28,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742281_1457 (size=12151) 2024-12-12T19:34:29,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:29,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032129028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:29,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:29,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032129037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:29,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:29,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032129042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:29,243 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/8e4532ed076a4adbb227696994951f50 2024-12-12T19:34:29,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/b077b0b574aa4ad494f83d5239ad71c9 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/b077b0b574aa4ad494f83d5239ad71c9 2024-12-12T19:34:29,260 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/b077b0b574aa4ad494f83d5239ad71c9, entries=200, sequenceid=169, filesize=14.2 K 2024-12-12T19:34:29,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/89873d353f604e41954fd470e17ca0f2 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/89873d353f604e41954fd470e17ca0f2 2024-12-12T19:34:29,281 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/89873d353f604e41954fd470e17ca0f2, entries=150, sequenceid=169, filesize=11.9 K 2024-12-12T19:34:29,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/8e4532ed076a4adbb227696994951f50 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/8e4532ed076a4adbb227696994951f50 2024-12-12T19:34:29,300 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/8e4532ed076a4adbb227696994951f50, entries=150, sequenceid=169, filesize=11.9 K 2024-12-12T19:34:29,303 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for d2250407ef6b64ed659c4133a7c4d89c in 946ms, sequenceid=169, compaction requested=true 2024-12-12T19:34:29,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:29,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:29,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:29,303 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:29,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:29,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:29,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:29,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:34:29,303 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:29,320 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:29,320 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/B is initiating minor compaction (all files) 2024-12-12T19:34:29,320 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/B in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:29,320 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/8835f2cc125548b3b43f285a84e8cbbb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/5d08df2b30104da485120b85ff79cb15, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/27bcbd9d5291485190beb7078cae107d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/89873d353f604e41954fd470e17ca0f2] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=47.4 K 2024-12-12T19:34:29,321 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53274 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:29,321 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/A is initiating minor compaction (all files) 2024-12-12T19:34:29,321 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/A in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:29,321 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/b9dfb625ff284093baf37de4ca332601, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/1ae883b5a26749338f6e4e3cfc155eeb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/1e74f191329949e7a1602a6b14e2e6c9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/b077b0b574aa4ad494f83d5239ad71c9] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=52.0 K 2024-12-12T19:34:29,322 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 8835f2cc125548b3b43f285a84e8cbbb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1734032065311 2024-12-12T19:34:29,323 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9dfb625ff284093baf37de4ca332601, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1734032065311 2024-12-12T19:34:29,327 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d08df2b30104da485120b85ff79cb15, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1734032065430 2024-12-12T19:34:29,328 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 27bcbd9d5291485190beb7078cae107d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1734032066568 2024-12-12T19:34:29,328 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 89873d353f604e41954fd470e17ca0f2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734032067686 2024-12-12T19:34:29,331 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ae883b5a26749338f6e4e3cfc155eeb, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1734032065430 2024-12-12T19:34:29,336 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e74f191329949e7a1602a6b14e2e6c9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1734032066568 2024-12-12T19:34:29,339 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting b077b0b574aa4ad494f83d5239ad71c9, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734032067686 2024-12-12T19:34:29,393 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#B#compaction#378 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:29,393 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/7019781fe8bd455da02171b7c8b5e0d4 is 50, key is test_row_0/B:col10/1734032067694/Put/seqid=0 2024-12-12T19:34:29,429 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#A#compaction#379 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:29,430 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/af4657ac7bf742059c9d2660b52dd821 is 50, key is test_row_0/A:col10/1734032067694/Put/seqid=0 2024-12-12T19:34:29,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742282_1458 (size=12527) 2024-12-12T19:34:29,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742283_1459 (size=12527) 2024-12-12T19:34:29,470 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/7019781fe8bd455da02171b7c8b5e0d4 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/7019781fe8bd455da02171b7c8b5e0d4 2024-12-12T19:34:29,474 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/af4657ac7bf742059c9d2660b52dd821 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/af4657ac7bf742059c9d2660b52dd821 2024-12-12T19:34:29,481 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/A of d2250407ef6b64ed659c4133a7c4d89c into af4657ac7bf742059c9d2660b52dd821(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:29,481 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:29,481 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/A, priority=12, startTime=1734032069303; duration=0sec 2024-12-12T19:34:29,482 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:29,482 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:A 2024-12-12T19:34:29,482 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:29,482 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/B of d2250407ef6b64ed659c4133a7c4d89c into 7019781fe8bd455da02171b7c8b5e0d4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:29,482 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:29,482 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/B, priority=12, startTime=1734032069303; duration=0sec 2024-12-12T19:34:29,482 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:29,483 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:B 2024-12-12T19:34:29,483 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:29,483 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/C is initiating minor compaction (all files) 2024-12-12T19:34:29,483 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/C in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:29,483 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/d35abc8bf239434b982f69c21183b901, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/b2b613887ce443e591ae5fe666a80612, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/674d0d58d014477ea24e909391c7a4b0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/8e4532ed076a4adbb227696994951f50] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=47.4 K 2024-12-12T19:34:29,485 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting d35abc8bf239434b982f69c21183b901, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1734032065311 2024-12-12T19:34:29,485 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2b613887ce443e591ae5fe666a80612, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1734032065430 2024-12-12T19:34:29,485 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 674d0d58d014477ea24e909391c7a4b0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1734032066568 2024-12-12T19:34:29,486 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e4532ed076a4adbb227696994951f50, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734032067686 2024-12-12T19:34:29,496 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#C#compaction#380 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:29,496 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/efda3d6e6b8e4f1e99e4ee914187b1c1 is 50, key is test_row_0/C:col10/1734032067694/Put/seqid=0 2024-12-12T19:34:29,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742284_1460 (size=12527) 2024-12-12T19:34:29,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:29,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T19:34:29,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:29,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:29,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:29,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:29,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:29,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:29,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T19:34:29,554 INFO [Thread-1919 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-12-12T19:34:29,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/6efcc3e5e3f945ac8b518b6bd0ad103c is 50, key is test_row_0/A:col10/1734032068382/Put/seqid=0 2024-12-12T19:34:29,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742285_1461 (size=12151) 2024-12-12T19:34:29,561 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:29,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=182 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/6efcc3e5e3f945ac8b518b6bd0ad103c 2024-12-12T19:34:29,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-12-12T19:34:29,562 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:29,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-12T19:34:29,562 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:29,562 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:29,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/dbe777b6b2d74593bd378599575d365a is 50, key is test_row_0/B:col10/1734032068382/Put/seqid=0 2024-12-12T19:34:29,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742286_1462 (size=12151) 2024-12-12T19:34:29,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:29,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032129604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:29,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:29,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032129612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:29,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:29,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032129615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:29,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-12T19:34:29,719 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:29,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:29,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032129716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:29,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-12T19:34:29,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:29,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:29,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:29,722 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:29,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:29,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:29,724 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:29,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032129721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:29,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:29,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032129724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:29,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-12T19:34:29,887 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:29,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-12T19:34:29,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:29,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:29,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:29,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:29,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:29,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:29,919 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/efda3d6e6b8e4f1e99e4ee914187b1c1 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/efda3d6e6b8e4f1e99e4ee914187b1c1 2024-12-12T19:34:29,933 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/C of d2250407ef6b64ed659c4133a7c4d89c into efda3d6e6b8e4f1e99e4ee914187b1c1(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:29,933 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:29,933 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/C, priority=12, startTime=1734032069303; duration=0sec 2024-12-12T19:34:29,933 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:29,933 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:C 2024-12-12T19:34:29,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:29,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032129926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:29,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:29,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032129932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:29,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:29,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032129938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:29,981 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=182 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/dbe777b6b2d74593bd378599575d365a 2024-12-12T19:34:30,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/9e47dbe2257f41adbe7c9465e42557da is 50, key is test_row_0/C:col10/1734032068382/Put/seqid=0 2024-12-12T19:34:30,041 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,042 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-12T19:34:30,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:30,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:30,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:30,042 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:30,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:30,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:30,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742287_1463 (size=12151) 2024-12-12T19:34:30,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-12T19:34:30,201 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-12T19:34:30,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:30,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:30,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:30,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:30,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:30,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:30,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:30,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032130237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:30,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032130237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:30,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032130254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,365 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-12T19:34:30,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:30,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:30,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:30,370 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:30,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:30,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:30,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=182 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/9e47dbe2257f41adbe7c9465e42557da 2024-12-12T19:34:30,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/6efcc3e5e3f945ac8b518b6bd0ad103c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/6efcc3e5e3f945ac8b518b6bd0ad103c 2024-12-12T19:34:30,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/6efcc3e5e3f945ac8b518b6bd0ad103c, entries=150, sequenceid=182, filesize=11.9 K 2024-12-12T19:34:30,535 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/dbe777b6b2d74593bd378599575d365a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/dbe777b6b2d74593bd378599575d365a 2024-12-12T19:34:30,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-12T19:34:30,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:30,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:30,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:30,543 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:30,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:30,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:30,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/dbe777b6b2d74593bd378599575d365a, entries=150, sequenceid=182, filesize=11.9 K 2024-12-12T19:34:30,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/9e47dbe2257f41adbe7c9465e42557da as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/9e47dbe2257f41adbe7c9465e42557da 2024-12-12T19:34:30,565 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/9e47dbe2257f41adbe7c9465e42557da, entries=150, sequenceid=182, filesize=11.9 K 2024-12-12T19:34:30,568 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for d2250407ef6b64ed659c4133a7c4d89c in 1034ms, sequenceid=182, compaction requested=false 2024-12-12T19:34:30,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:30,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-12T19:34:30,707 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,709 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-12T19:34:30,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:30,709 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T19:34:30,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:30,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:30,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:30,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:30,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:30,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:30,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/e0dd1a1e7d75411e81a9b2ac36506aef is 50, key is test_row_0/A:col10/1734032069602/Put/seqid=0 2024-12-12T19:34:30,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:30,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:30,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742288_1464 (size=12151) 2024-12-12T19:34:30,763 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/e0dd1a1e7d75411e81a9b2ac36506aef 2024-12-12T19:34:30,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:30,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032130770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:30,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032130773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:30,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032130773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:30,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032130782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:30,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032130783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/02a5829e5d584a099361512c1ddbef68 is 50, key is test_row_0/B:col10/1734032069602/Put/seqid=0 2024-12-12T19:34:30,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742289_1465 (size=12151) 2024-12-12T19:34:30,858 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/02a5829e5d584a099361512c1ddbef68 2024-12-12T19:34:30,875 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T19:34:30,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/a150f499d5664e389ad5a66fca36d853 is 50, key is test_row_0/C:col10/1734032069602/Put/seqid=0 2024-12-12T19:34:30,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:30,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032130884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:30,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032130884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:30,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032130886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:30,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032130896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:30,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032130896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:30,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742290_1466 (size=12151) 2024-12-12T19:34:30,911 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/a150f499d5664e389ad5a66fca36d853 2024-12-12T19:34:30,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/e0dd1a1e7d75411e81a9b2ac36506aef as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/e0dd1a1e7d75411e81a9b2ac36506aef 2024-12-12T19:34:30,947 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/e0dd1a1e7d75411e81a9b2ac36506aef, entries=150, sequenceid=209, filesize=11.9 K 2024-12-12T19:34:30,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/02a5829e5d584a099361512c1ddbef68 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/02a5829e5d584a099361512c1ddbef68 2024-12-12T19:34:30,972 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/02a5829e5d584a099361512c1ddbef68, entries=150, sequenceid=209, filesize=11.9 K 2024-12-12T19:34:30,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/a150f499d5664e389ad5a66fca36d853 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/a150f499d5664e389ad5a66fca36d853 2024-12-12T19:34:30,989 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/a150f499d5664e389ad5a66fca36d853, entries=150, sequenceid=209, filesize=11.9 K 2024-12-12T19:34:30,991 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for d2250407ef6b64ed659c4133a7c4d89c in 282ms, sequenceid=209, compaction requested=true 2024-12-12T19:34:30,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:30,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:30,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-12-12T19:34:30,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-12-12T19:34:31,000 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-12T19:34:31,000 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4320 sec 2024-12-12T19:34:31,019 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 1.4430 sec 2024-12-12T19:34:31,104 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T19:34:31,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:31,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:31,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:31,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:31,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:31,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:31,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:31,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/a085521d8c0c4f48a12e6f5c4b6d8cbe is 50, key is test_row_0/A:col10/1734032071102/Put/seqid=0 2024-12-12T19:34:31,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742291_1467 (size=12151) 2024-12-12T19:34:31,183 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/a085521d8c0c4f48a12e6f5c4b6d8cbe 2024-12-12T19:34:31,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/e72980a6bf694cd2bbecd3bb02e4d34a is 50, key is test_row_0/B:col10/1734032071102/Put/seqid=0 2024-12-12T19:34:31,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032131240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032131242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032131246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032131255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032131255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742292_1468 (size=12151) 2024-12-12T19:34:31,296 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/e72980a6bf694cd2bbecd3bb02e4d34a 2024-12-12T19:34:31,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/7d38814215974d39b3c5ad56b8028c0c is 50, key is test_row_0/C:col10/1734032071102/Put/seqid=0 2024-12-12T19:34:31,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032131363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032131375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032131376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032131379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032131392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742293_1469 (size=12151) 2024-12-12T19:34:31,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032131576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032131584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032131592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032131592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032131604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-12T19:34:31,699 INFO [Thread-1919 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-12T19:34:31,720 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:31,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-12-12T19:34:31,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-12T19:34:31,726 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:31,726 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:31,727 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:31,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/7d38814215974d39b3c5ad56b8028c0c 2024-12-12T19:34:31,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-12T19:34:31,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/a085521d8c0c4f48a12e6f5c4b6d8cbe as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/a085521d8c0c4f48a12e6f5c4b6d8cbe 2024-12-12T19:34:31,870 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/a085521d8c0c4f48a12e6f5c4b6d8cbe, entries=150, sequenceid=221, filesize=11.9 K 2024-12-12T19:34:31,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/e72980a6bf694cd2bbecd3bb02e4d34a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/e72980a6bf694cd2bbecd3bb02e4d34a 2024-12-12T19:34:31,883 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-12T19:34:31,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:31,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:31,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:31,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:31,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:31,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:31,891 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/e72980a6bf694cd2bbecd3bb02e4d34a, entries=150, sequenceid=221, filesize=11.9 K 2024-12-12T19:34:31,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032131889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/7d38814215974d39b3c5ad56b8028c0c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/7d38814215974d39b3c5ad56b8028c0c 2024-12-12T19:34:31,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032131890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032131900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032131903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:31,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032131911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:31,935 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/7d38814215974d39b3c5ad56b8028c0c, entries=150, sequenceid=221, filesize=11.9 K 2024-12-12T19:34:31,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d2250407ef6b64ed659c4133a7c4d89c in 839ms, sequenceid=221, compaction requested=true 2024-12-12T19:34:31,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:31,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:31,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:31,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:31,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:34:31,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:31,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-12T19:34:31,947 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:31,951 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:31,970 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:31,970 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/A is initiating minor compaction (all files) 2024-12-12T19:34:31,970 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/A in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:31,970 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/af4657ac7bf742059c9d2660b52dd821, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/6efcc3e5e3f945ac8b518b6bd0ad103c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/e0dd1a1e7d75411e81a9b2ac36506aef, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/a085521d8c0c4f48a12e6f5c4b6d8cbe] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=47.8 K 2024-12-12T19:34:31,975 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:31,975 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/C is initiating minor compaction (all files) 2024-12-12T19:34:31,975 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/C in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:31,976 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/efda3d6e6b8e4f1e99e4ee914187b1c1, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/9e47dbe2257f41adbe7c9465e42557da, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/a150f499d5664e389ad5a66fca36d853, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/7d38814215974d39b3c5ad56b8028c0c] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=47.8 K 2024-12-12T19:34:31,976 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting af4657ac7bf742059c9d2660b52dd821, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734032067686 2024-12-12T19:34:31,979 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting efda3d6e6b8e4f1e99e4ee914187b1c1, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734032067686 2024-12-12T19:34:31,980 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6efcc3e5e3f945ac8b518b6bd0ad103c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1734032068369 2024-12-12T19:34:31,980 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e47dbe2257f41adbe7c9465e42557da, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1734032068369 2024-12-12T19:34:31,983 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0dd1a1e7d75411e81a9b2ac36506aef, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1734032069601 2024-12-12T19:34:31,987 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting a150f499d5664e389ad5a66fca36d853, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1734032069601 2024-12-12T19:34:31,988 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting a085521d8c0c4f48a12e6f5c4b6d8cbe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1734032070772 2024-12-12T19:34:31,995 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d38814215974d39b3c5ad56b8028c0c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1734032070772 2024-12-12T19:34:32,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-12T19:34:32,040 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:32,041 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-12T19:34:32,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:32,043 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T19:34:32,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:32,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:32,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:32,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:32,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:32,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:32,044 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#C#compaction#390 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:32,045 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/53f53c0fc6504f7694cb82a236bf507e is 50, key is test_row_0/C:col10/1734032071102/Put/seqid=0 2024-12-12T19:34:32,076 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#A#compaction#391 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:32,077 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/fb934219e8c9490e9449d2dbff70f42a is 50, key is test_row_0/A:col10/1734032071102/Put/seqid=0 2024-12-12T19:34:32,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742294_1470 (size=12663) 2024-12-12T19:34:32,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/f03dd185d0af43518304312bee352ccc is 50, key is test_row_0/A:col10/1734032071184/Put/seqid=0 2024-12-12T19:34:32,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742295_1471 (size=12663) 2024-12-12T19:34:32,160 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/fb934219e8c9490e9449d2dbff70f42a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/fb934219e8c9490e9449d2dbff70f42a 2024-12-12T19:34:32,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742296_1472 (size=12151) 2024-12-12T19:34:32,167 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/53f53c0fc6504f7694cb82a236bf507e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/53f53c0fc6504f7694cb82a236bf507e 2024-12-12T19:34:32,185 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/A of d2250407ef6b64ed659c4133a7c4d89c into fb934219e8c9490e9449d2dbff70f42a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:32,185 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:32,185 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/A, priority=12, startTime=1734032071944; duration=0sec 2024-12-12T19:34:32,185 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:32,185 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:A 2024-12-12T19:34:32,185 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:32,194 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:32,194 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/B is initiating minor compaction (all files) 2024-12-12T19:34:32,194 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/B in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:32,194 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/7019781fe8bd455da02171b7c8b5e0d4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/dbe777b6b2d74593bd378599575d365a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/02a5829e5d584a099361512c1ddbef68, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/e72980a6bf694cd2bbecd3bb02e4d34a] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=47.8 K 2024-12-12T19:34:32,194 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7019781fe8bd455da02171b7c8b5e0d4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734032067686 2024-12-12T19:34:32,195 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbe777b6b2d74593bd378599575d365a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1734032068369 2024-12-12T19:34:32,195 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02a5829e5d584a099361512c1ddbef68, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1734032069601 2024-12-12T19:34:32,195 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting e72980a6bf694cd2bbecd3bb02e4d34a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1734032070772 2024-12-12T19:34:32,199 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/C of d2250407ef6b64ed659c4133a7c4d89c into 53f53c0fc6504f7694cb82a236bf507e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:32,199 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:32,199 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/C, priority=12, startTime=1734032071944; duration=0sec 2024-12-12T19:34:32,199 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:32,199 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:C 2024-12-12T19:34:32,220 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#B#compaction#393 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:32,220 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/9803cd9ffb95401aa9b12a3be9d472a2 is 50, key is test_row_0/B:col10/1734032071102/Put/seqid=0 2024-12-12T19:34:32,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742297_1473 (size=12663) 2024-12-12T19:34:32,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-12T19:34:32,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:32,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:32,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:32,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032132431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:32,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:32,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032132431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:32,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:32,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032132439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:32,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:32,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032132439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:32,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:32,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032132447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:32,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:32,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032132552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:32,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:32,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032132556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:32,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:32,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032132559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:32,567 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/f03dd185d0af43518304312bee352ccc 2024-12-12T19:34:32,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/d738a50d84924c00bda478b7eb19ef28 is 50, key is test_row_0/B:col10/1734032071184/Put/seqid=0 2024-12-12T19:34:32,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742298_1474 (size=12151) 2024-12-12T19:34:32,615 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/d738a50d84924c00bda478b7eb19ef28 2024-12-12T19:34:32,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/0090ecc4fc8c44a184b860abd3ef6d10 is 50, key is test_row_0/C:col10/1734032071184/Put/seqid=0 2024-12-12T19:34:32,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742299_1475 (size=12151) 2024-12-12T19:34:32,700 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/0090ecc4fc8c44a184b860abd3ef6d10 2024-12-12T19:34:32,719 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/9803cd9ffb95401aa9b12a3be9d472a2 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/9803cd9ffb95401aa9b12a3be9d472a2 2024-12-12T19:34:32,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/f03dd185d0af43518304312bee352ccc as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/f03dd185d0af43518304312bee352ccc 2024-12-12T19:34:32,740 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/B of d2250407ef6b64ed659c4133a7c4d89c into 9803cd9ffb95401aa9b12a3be9d472a2(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:32,740 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:32,740 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/B, priority=12, startTime=1734032071944; duration=0sec 2024-12-12T19:34:32,740 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:32,740 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:B 2024-12-12T19:34:32,746 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/f03dd185d0af43518304312bee352ccc, entries=150, sequenceid=245, filesize=11.9 K 2024-12-12T19:34:32,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/d738a50d84924c00bda478b7eb19ef28 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/d738a50d84924c00bda478b7eb19ef28 2024-12-12T19:34:32,759 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/d738a50d84924c00bda478b7eb19ef28, entries=150, sequenceid=245, filesize=11.9 K 2024-12-12T19:34:32,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:32,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032132760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:32,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:32,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032132764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:32,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/0090ecc4fc8c44a184b860abd3ef6d10 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0090ecc4fc8c44a184b860abd3ef6d10 2024-12-12T19:34:32,772 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0090ecc4fc8c44a184b860abd3ef6d10, entries=150, sequenceid=245, filesize=11.9 K 2024-12-12T19:34:32,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:32,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032132769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:32,784 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d2250407ef6b64ed659c4133a7c4d89c in 741ms, sequenceid=245, compaction requested=false 2024-12-12T19:34:32,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:32,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:32,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-12-12T19:34:32,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-12-12T19:34:32,799 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-12T19:34:32,799 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0620 sec 2024-12-12T19:34:32,801 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 1.0800 sec 2024-12-12T19:34:32,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-12T19:34:32,835 INFO [Thread-1919 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-12T19:34:32,844 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:32,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-12-12T19:34:32,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-12T19:34:32,849 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:32,857 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:32,857 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:32,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-12T19:34:33,010 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-12T19:34:33,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:33,011 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:34:33,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:33,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:33,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:33,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:33,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:33,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:33,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/282590085b33441488219dd41bf7ae3e is 50, key is test_row_0/A:col10/1734032072432/Put/seqid=0 2024-12-12T19:34:33,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:33,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:33,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742300_1476 (size=12251) 2024-12-12T19:34:33,102 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/282590085b33441488219dd41bf7ae3e 2024-12-12T19:34:33,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/db3accb370724dfa97d5b624059eabde is 50, key is test_row_0/B:col10/1734032072432/Put/seqid=0 2024-12-12T19:34:33,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-12T19:34:33,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742301_1477 (size=12251) 2024-12-12T19:34:33,165 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/db3accb370724dfa97d5b624059eabde 2024-12-12T19:34:33,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/709c765769ed4d949b7dc2b77c3cfde4 is 50, key is test_row_0/C:col10/1734032072432/Put/seqid=0 2024-12-12T19:34:33,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032133237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032133240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032133241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742302_1478 (size=12251) 2024-12-12T19:34:33,280 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/709c765769ed4d949b7dc2b77c3cfde4 2024-12-12T19:34:33,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/282590085b33441488219dd41bf7ae3e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/282590085b33441488219dd41bf7ae3e 2024-12-12T19:34:33,345 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/282590085b33441488219dd41bf7ae3e, entries=150, sequenceid=260, filesize=12.0 K 2024-12-12T19:34:33,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/db3accb370724dfa97d5b624059eabde as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/db3accb370724dfa97d5b624059eabde 2024-12-12T19:34:33,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032133351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032133352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032133356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,406 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/db3accb370724dfa97d5b624059eabde, entries=150, sequenceid=260, filesize=12.0 K 2024-12-12T19:34:33,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/709c765769ed4d949b7dc2b77c3cfde4 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/709c765769ed4d949b7dc2b77c3cfde4 2024-12-12T19:34:33,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-12T19:34:33,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032133450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,471 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/709c765769ed4d949b7dc2b77c3cfde4, entries=150, sequenceid=260, filesize=12.0 K 2024-12-12T19:34:33,477 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d2250407ef6b64ed659c4133a7c4d89c in 466ms, sequenceid=260, compaction requested=true 2024-12-12T19:34:33,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:33,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:33,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-12-12T19:34:33,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-12-12T19:34:33,479 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T19:34:33,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:33,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:33,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:33,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:33,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:33,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:33,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:33,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-12T19:34:33,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 626 msec 2024-12-12T19:34:33,501 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/0462f69833d04b3c997e347790afd3e4 is 50, key is test_row_0/A:col10/1734032073476/Put/seqid=0 2024-12-12T19:34:33,503 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 650 msec 2024-12-12T19:34:33,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742303_1479 (size=14741) 2024-12-12T19:34:33,553 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/0462f69833d04b3c997e347790afd3e4 2024-12-12T19:34:33,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032133567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/653a83a7a92a48e0b829dd39d4b52e0a is 50, key is test_row_0/B:col10/1734032073476/Put/seqid=0 2024-12-12T19:34:33,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032133571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032133571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032133583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742304_1480 (size=12301) 2024-12-12T19:34:33,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/653a83a7a92a48e0b829dd39d4b52e0a 2024-12-12T19:34:33,665 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/124f329c2ac64284923c820ce89378de is 50, key is test_row_0/C:col10/1734032073476/Put/seqid=0 2024-12-12T19:34:33,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032133682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032133691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742305_1481 (size=12301) 2024-12-12T19:34:33,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032133882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032133884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032133891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:33,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032133903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:33,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-12T19:34:33,960 INFO [Thread-1919 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-12-12T19:34:33,962 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:33,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-12-12T19:34:33,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T19:34:33,964 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:33,965 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:33,965 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:34,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T19:34:34,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/124f329c2ac64284923c820ce89378de 2024-12-12T19:34:34,121 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:34,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T19:34:34,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:34,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:34,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:34,122 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:34,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:34,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:34,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/0462f69833d04b3c997e347790afd3e4 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/0462f69833d04b3c997e347790afd3e4 2024-12-12T19:34:34,189 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/0462f69833d04b3c997e347790afd3e4, entries=200, sequenceid=285, filesize=14.4 K 2024-12-12T19:34:34,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/653a83a7a92a48e0b829dd39d4b52e0a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/653a83a7a92a48e0b829dd39d4b52e0a 2024-12-12T19:34:34,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:34,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032134199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:34,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:34,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032134221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:34,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/653a83a7a92a48e0b829dd39d4b52e0a, entries=150, sequenceid=285, filesize=12.0 K 2024-12-12T19:34:34,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/124f329c2ac64284923c820ce89378de as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/124f329c2ac64284923c820ce89378de 2024-12-12T19:34:34,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T19:34:34,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/124f329c2ac64284923c820ce89378de, entries=150, sequenceid=285, filesize=12.0 K 2024-12-12T19:34:34,283 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for d2250407ef6b64ed659c4133a7c4d89c in 804ms, sequenceid=285, compaction requested=true 2024-12-12T19:34:34,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:34,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:34,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:34,283 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:34,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:34,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:34,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:34,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:34:34,284 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:34,287 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:34,288 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T19:34:34,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:34,288 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:34:34,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:34,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:34,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:34,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:34,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:34,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:34,294 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49366 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:34,294 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51806 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:34,294 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/A is initiating minor compaction (all files) 2024-12-12T19:34:34,294 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/B is initiating minor compaction (all files) 2024-12-12T19:34:34,294 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/A in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:34,294 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/fb934219e8c9490e9449d2dbff70f42a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/f03dd185d0af43518304312bee352ccc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/282590085b33441488219dd41bf7ae3e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/0462f69833d04b3c997e347790afd3e4] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=50.6 K 2024-12-12T19:34:34,294 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/B in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:34,294 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/9803cd9ffb95401aa9b12a3be9d472a2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/d738a50d84924c00bda478b7eb19ef28, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/db3accb370724dfa97d5b624059eabde, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/653a83a7a92a48e0b829dd39d4b52e0a] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=48.2 K 2024-12-12T19:34:34,299 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb934219e8c9490e9449d2dbff70f42a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1734032070772 2024-12-12T19:34:34,299 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 9803cd9ffb95401aa9b12a3be9d472a2, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1734032070772 2024-12-12T19:34:34,301 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting f03dd185d0af43518304312bee352ccc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1734032071184 2024-12-12T19:34:34,301 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting d738a50d84924c00bda478b7eb19ef28, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1734032071184 2024-12-12T19:34:34,303 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting db3accb370724dfa97d5b624059eabde, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1734032072423 2024-12-12T19:34:34,304 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 282590085b33441488219dd41bf7ae3e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1734032072423 2024-12-12T19:34:34,307 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0462f69833d04b3c997e347790afd3e4, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1734032073156 2024-12-12T19:34:34,307 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 653a83a7a92a48e0b829dd39d4b52e0a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1734032073156 2024-12-12T19:34:34,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/83c86dd195f647529613a4d5257d72a8 is 50, key is test_row_0/A:col10/1734032073550/Put/seqid=0 2024-12-12T19:34:34,340 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#B#compaction#403 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:34,341 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/084d0326b1af47c38bf6b8763de1eb3c is 50, key is test_row_0/B:col10/1734032073476/Put/seqid=0 2024-12-12T19:34:34,353 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#A#compaction#404 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:34,353 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/a754369f00d9442f8d54fcd3cd3f8048 is 50, key is test_row_0/A:col10/1734032073476/Put/seqid=0 2024-12-12T19:34:34,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742306_1482 (size=12301) 2024-12-12T19:34:34,378 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/83c86dd195f647529613a4d5257d72a8 2024-12-12T19:34:34,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:34,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:34,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742308_1484 (size=12949) 2024-12-12T19:34:34,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742307_1483 (size=12949) 2024-12-12T19:34:34,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/318abf853ea44d83a9c54ac7bd66f845 is 50, key is test_row_0/B:col10/1734032073550/Put/seqid=0 2024-12-12T19:34:34,460 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/a754369f00d9442f8d54fcd3cd3f8048 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/a754369f00d9442f8d54fcd3cd3f8048 2024-12-12T19:34:34,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742309_1485 (size=12301) 2024-12-12T19:34:34,475 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/084d0326b1af47c38bf6b8763de1eb3c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/084d0326b1af47c38bf6b8763de1eb3c 2024-12-12T19:34:34,487 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/A of d2250407ef6b64ed659c4133a7c4d89c into a754369f00d9442f8d54fcd3cd3f8048(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:34,487 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:34,487 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/A, priority=12, startTime=1734032074283; duration=0sec 2024-12-12T19:34:34,487 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:34,487 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:A 2024-12-12T19:34:34,488 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:34:34,510 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49366 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:34:34,510 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/C is initiating minor compaction (all files) 2024-12-12T19:34:34,510 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/C in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:34,510 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/53f53c0fc6504f7694cb82a236bf507e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0090ecc4fc8c44a184b860abd3ef6d10, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/709c765769ed4d949b7dc2b77c3cfde4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/124f329c2ac64284923c820ce89378de] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=48.2 K 2024-12-12T19:34:34,515 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53f53c0fc6504f7694cb82a236bf507e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1734032070772 2024-12-12T19:34:34,520 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/B of d2250407ef6b64ed659c4133a7c4d89c into 084d0326b1af47c38bf6b8763de1eb3c(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:34,520 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:34,520 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/B, priority=12, startTime=1734032074283; duration=0sec 2024-12-12T19:34:34,520 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:34,520 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:B 2024-12-12T19:34:34,523 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0090ecc4fc8c44a184b860abd3ef6d10, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1734032071184 2024-12-12T19:34:34,530 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 709c765769ed4d949b7dc2b77c3cfde4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1734032072423 2024-12-12T19:34:34,539 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 124f329c2ac64284923c820ce89378de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1734032073156 2024-12-12T19:34:34,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T19:34:34,576 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#C#compaction#406 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:34,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:34,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032134566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:34,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:34,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032134570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:34,576 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/0d794d035bc64570a6d4803d8594f797 is 50, key is test_row_0/C:col10/1734032073476/Put/seqid=0 2024-12-12T19:34:34,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742310_1486 (size=12949) 2024-12-12T19:34:34,659 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/0d794d035bc64570a6d4803d8594f797 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0d794d035bc64570a6d4803d8594f797 2024-12-12T19:34:34,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:34,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032134680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:34,681 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/C of d2250407ef6b64ed659c4133a7c4d89c into 0d794d035bc64570a6d4803d8594f797(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:34,682 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:34,682 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/C, priority=12, startTime=1734032074284; duration=0sec 2024-12-12T19:34:34,682 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:34,682 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:C 2024-12-12T19:34:34,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:34,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032134682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:34,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:34,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032134705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:34,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:34,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032134732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:34,883 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/318abf853ea44d83a9c54ac7bd66f845 2024-12-12T19:34:34,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:34,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032134889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:34,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:34,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032134891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:34,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/5ff7eae19850404c9959e02f11c84fc0 is 50, key is test_row_0/C:col10/1734032073550/Put/seqid=0 2024-12-12T19:34:34,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742311_1487 (size=12301) 2024-12-12T19:34:35,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T19:34:35,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:35,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032135200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:35,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:35,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032135207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:35,379 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/5ff7eae19850404c9959e02f11c84fc0 2024-12-12T19:34:35,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/83c86dd195f647529613a4d5257d72a8 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/83c86dd195f647529613a4d5257d72a8 2024-12-12T19:34:35,428 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/83c86dd195f647529613a4d5257d72a8, entries=150, sequenceid=297, filesize=12.0 K 2024-12-12T19:34:35,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/318abf853ea44d83a9c54ac7bd66f845 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/318abf853ea44d83a9c54ac7bd66f845 2024-12-12T19:34:35,438 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/318abf853ea44d83a9c54ac7bd66f845, entries=150, sequenceid=297, filesize=12.0 K 2024-12-12T19:34:35,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/5ff7eae19850404c9959e02f11c84fc0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/5ff7eae19850404c9959e02f11c84fc0 2024-12-12T19:34:35,447 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/5ff7eae19850404c9959e02f11c84fc0, entries=150, sequenceid=297, filesize=12.0 K 2024-12-12T19:34:35,448 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d2250407ef6b64ed659c4133a7c4d89c in 1160ms, sequenceid=297, compaction requested=false 2024-12-12T19:34:35,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:35,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:35,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-12-12T19:34:35,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-12-12T19:34:35,463 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-12T19:34:35,463 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4860 sec 2024-12-12T19:34:35,476 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 1.5100 sec 2024-12-12T19:34:35,496 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T19:34:35,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:35,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:35,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:35,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:35,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:35,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:35,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:35,509 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/01bc0509498040b2bffd3dc70331adf6 is 50, key is test_row_0/A:col10/1734032074569/Put/seqid=0 2024-12-12T19:34:35,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742312_1488 (size=12301) 2024-12-12T19:34:35,545 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/01bc0509498040b2bffd3dc70331adf6 2024-12-12T19:34:35,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/fe239fd00c5a473f815d5f8041480e10 is 50, key is test_row_0/B:col10/1734032074569/Put/seqid=0 2024-12-12T19:34:35,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:35,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032135560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:35,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742313_1489 (size=12301) 2024-12-12T19:34:35,585 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/fe239fd00c5a473f815d5f8041480e10 2024-12-12T19:34:35,601 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/63b27405146d47d89656f41f5e3f9a83 is 50, key is test_row_0/C:col10/1734032074569/Put/seqid=0 2024-12-12T19:34:35,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742314_1490 (size=12301) 2024-12-12T19:34:35,664 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/63b27405146d47d89656f41f5e3f9a83 2024-12-12T19:34:35,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:35,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032135675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:35,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/01bc0509498040b2bffd3dc70331adf6 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/01bc0509498040b2bffd3dc70331adf6 2024-12-12T19:34:35,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:35,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032135715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:35,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:35,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032135719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:35,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:35,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032135719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:35,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/01bc0509498040b2bffd3dc70331adf6, entries=150, sequenceid=325, filesize=12.0 K 2024-12-12T19:34:35,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/fe239fd00c5a473f815d5f8041480e10 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/fe239fd00c5a473f815d5f8041480e10 2024-12-12T19:34:35,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:35,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032135741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:35,787 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/fe239fd00c5a473f815d5f8041480e10, entries=150, sequenceid=325, filesize=12.0 K 2024-12-12T19:34:35,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/63b27405146d47d89656f41f5e3f9a83 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/63b27405146d47d89656f41f5e3f9a83 2024-12-12T19:34:35,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/63b27405146d47d89656f41f5e3f9a83, entries=150, sequenceid=325, filesize=12.0 K 2024-12-12T19:34:35,840 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d2250407ef6b64ed659c4133a7c4d89c in 343ms, sequenceid=325, compaction requested=true 2024-12-12T19:34:35,840 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:35,840 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:35,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:35,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:35,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:35,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:35,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:35,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:34:35,843 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:35,852 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:35,852 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/A is initiating minor compaction (all files) 2024-12-12T19:34:35,853 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/A in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:35,853 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/a754369f00d9442f8d54fcd3cd3f8048, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/83c86dd195f647529613a4d5257d72a8, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/01bc0509498040b2bffd3dc70331adf6] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=36.7 K 2024-12-12T19:34:35,857 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:35,857 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/B is initiating minor compaction (all files) 2024-12-12T19:34:35,857 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/B in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:35,857 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/084d0326b1af47c38bf6b8763de1eb3c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/318abf853ea44d83a9c54ac7bd66f845, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/fe239fd00c5a473f815d5f8041480e10] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=36.7 K 2024-12-12T19:34:35,858 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting a754369f00d9442f8d54fcd3cd3f8048, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1734032073156 2024-12-12T19:34:35,858 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 084d0326b1af47c38bf6b8763de1eb3c, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1734032073156 2024-12-12T19:34:35,858 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 83c86dd195f647529613a4d5257d72a8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1734032073550 2024-12-12T19:34:35,860 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 318abf853ea44d83a9c54ac7bd66f845, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1734032073550 2024-12-12T19:34:35,860 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 01bc0509498040b2bffd3dc70331adf6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1734032074564 2024-12-12T19:34:35,861 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe239fd00c5a473f815d5f8041480e10, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1734032074564 2024-12-12T19:34:35,878 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#B#compaction#411 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:35,878 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/19eb79fbd438420697202b0b6e8e06ab is 50, key is test_row_0/B:col10/1734032074569/Put/seqid=0 2024-12-12T19:34:35,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:35,901 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:34:35,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:35,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:35,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:35,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:35,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:35,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:35,905 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#A#compaction#412 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:35,906 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/e19d4115a50c4529b9c1c0717d1035d3 is 50, key is test_row_0/A:col10/1734032074569/Put/seqid=0 2024-12-12T19:34:35,925 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/11805f4864d6411da514daa9c9237e77 is 50, key is test_row_0/A:col10/1734032075545/Put/seqid=0 2024-12-12T19:34:35,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742315_1491 (size=13051) 2024-12-12T19:34:35,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742316_1492 (size=13051) 2024-12-12T19:34:35,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742317_1493 (size=14741) 2024-12-12T19:34:35,975 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/11805f4864d6411da514daa9c9237e77 2024-12-12T19:34:35,993 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/e19d4115a50c4529b9c1c0717d1035d3 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/e19d4115a50c4529b9c1c0717d1035d3 2024-12-12T19:34:36,025 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/fcbef3e3fd414666b5c687712b73b536 is 50, key is test_row_0/B:col10/1734032075545/Put/seqid=0 2024-12-12T19:34:36,036 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/A of d2250407ef6b64ed659c4133a7c4d89c into e19d4115a50c4529b9c1c0717d1035d3(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:36,036 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:36,036 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/A, priority=13, startTime=1734032075840; duration=0sec 2024-12-12T19:34:36,036 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:36,036 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:A 2024-12-12T19:34:36,036 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:36,043 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:36,043 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/C is initiating minor compaction (all files) 2024-12-12T19:34:36,043 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/C in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:36,043 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0d794d035bc64570a6d4803d8594f797, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/5ff7eae19850404c9959e02f11c84fc0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/63b27405146d47d89656f41f5e3f9a83] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=36.7 K 2024-12-12T19:34:36,047 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d794d035bc64570a6d4803d8594f797, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1734032073156 2024-12-12T19:34:36,048 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ff7eae19850404c9959e02f11c84fc0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1734032073550 2024-12-12T19:34:36,048 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 63b27405146d47d89656f41f5e3f9a83, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1734032074564 2024-12-12T19:34:36,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742318_1494 (size=12301) 2024-12-12T19:34:36,062 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#C#compaction#415 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:36,063 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/e952082fb1604ef9ab9120afe91b5016 is 50, key is test_row_0/C:col10/1734032074569/Put/seqid=0 2024-12-12T19:34:36,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/fcbef3e3fd414666b5c687712b73b536 2024-12-12T19:34:36,072 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/918ed26a8bbb48dea6e47ec00ab2ed29 is 50, key is test_row_0/C:col10/1734032075545/Put/seqid=0 2024-12-12T19:34:36,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T19:34:36,078 INFO [Thread-1919 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-12T19:34:36,081 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:36,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-12-12T19:34:36,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T19:34:36,087 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:36,089 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:36,089 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:36,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:36,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032136080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:36,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742319_1495 (size=13051) 2024-12-12T19:34:36,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742320_1496 (size=12301) 2024-12-12T19:34:36,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/918ed26a8bbb48dea6e47ec00ab2ed29 2024-12-12T19:34:36,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/11805f4864d6411da514daa9c9237e77 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/11805f4864d6411da514daa9c9237e77 2024-12-12T19:34:36,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T19:34:36,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:36,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032136197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:36,203 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/11805f4864d6411da514daa9c9237e77, entries=200, sequenceid=337, filesize=14.4 K 2024-12-12T19:34:36,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/fcbef3e3fd414666b5c687712b73b536 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/fcbef3e3fd414666b5c687712b73b536 2024-12-12T19:34:36,240 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:36,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/fcbef3e3fd414666b5c687712b73b536, entries=150, sequenceid=337, filesize=12.0 K 2024-12-12T19:34:36,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T19:34:36,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:36,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:36,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:36,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:36,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:36,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:36,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/918ed26a8bbb48dea6e47ec00ab2ed29 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/918ed26a8bbb48dea6e47ec00ab2ed29 2024-12-12T19:34:36,299 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/918ed26a8bbb48dea6e47ec00ab2ed29, entries=150, sequenceid=337, filesize=12.0 K 2024-12-12T19:34:36,302 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d2250407ef6b64ed659c4133a7c4d89c in 401ms, sequenceid=337, compaction requested=false 2024-12-12T19:34:36,302 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:36,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T19:34:36,393 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/19eb79fbd438420697202b0b6e8e06ab as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/19eb79fbd438420697202b0b6e8e06ab 2024-12-12T19:34:36,403 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/B of d2250407ef6b64ed659c4133a7c4d89c into 19eb79fbd438420697202b0b6e8e06ab(size=12.7 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:36,403 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:36,403 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/B, priority=13, startTime=1734032075840; duration=0sec 2024-12-12T19:34:36,403 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:36,403 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:B 2024-12-12T19:34:36,411 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:36,413 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T19:34:36,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:36,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:36,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:36,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:36,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:36,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:36,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:36,415 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T19:34:36,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:36,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:36,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:36,415 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:36,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:36,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:36,420 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/92804db83a6545dc990ffe7fa712d61e is 50, key is test_row_0/A:col10/1734032076067/Put/seqid=0 2024-12-12T19:34:36,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742321_1497 (size=14741) 2024-12-12T19:34:36,472 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/92804db83a6545dc990ffe7fa712d61e 2024-12-12T19:34:36,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:36,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032136475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:36,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/6bed93737cf043ed8146333712061387 is 50, key is test_row_0/B:col10/1734032076067/Put/seqid=0 2024-12-12T19:34:36,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742322_1498 (size=12301) 2024-12-12T19:34:36,517 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/e952082fb1604ef9ab9120afe91b5016 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/e952082fb1604ef9ab9120afe91b5016 2024-12-12T19:34:36,520 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/C of d2250407ef6b64ed659c4133a7c4d89c into e952082fb1604ef9ab9120afe91b5016(size=12.7 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:36,520 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:36,520 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/C, priority=13, startTime=1734032075840; duration=0sec 2024-12-12T19:34:36,520 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:36,520 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:C 2024-12-12T19:34:36,568 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:36,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T19:34:36,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:36,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:36,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:36,570 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:36,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:36,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:36,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:36,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032136578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:36,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T19:34:36,727 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:36,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T19:34:36,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:36,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:36,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:36,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:36,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:36,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:36,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:36,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032136731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:36,742 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:36,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032136735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:36,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:36,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032136784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:36,889 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:36,889 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T19:34:36,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:36,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:36,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:36,890 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:36,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:36,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:36,913 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/6bed93737cf043ed8146333712061387 2024-12-12T19:34:36,941 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/4e602fdf11854336b4fd6d0d6fb9bc95 is 50, key is test_row_0/C:col10/1734032076067/Put/seqid=0 2024-12-12T19:34:36,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742323_1499 (size=12301) 2024-12-12T19:34:37,044 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:37,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T19:34:37,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:37,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:37,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:37,044 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:37,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:37,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:37,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:37,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032137088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:37,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T19:34:37,198 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:37,199 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T19:34:37,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:37,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:37,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:37,199 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:37,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:37,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:37,357 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/4e602fdf11854336b4fd6d0d6fb9bc95 2024-12-12T19:34:37,359 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:37,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T19:34:37,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:37,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:37,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:37,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:37,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:37,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:37,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/92804db83a6545dc990ffe7fa712d61e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/92804db83a6545dc990ffe7fa712d61e 2024-12-12T19:34:37,399 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/92804db83a6545dc990ffe7fa712d61e, entries=200, sequenceid=364, filesize=14.4 K 2024-12-12T19:34:37,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/6bed93737cf043ed8146333712061387 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/6bed93737cf043ed8146333712061387 2024-12-12T19:34:37,421 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/6bed93737cf043ed8146333712061387, entries=150, sequenceid=364, filesize=12.0 K 2024-12-12T19:34:37,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/4e602fdf11854336b4fd6d0d6fb9bc95 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/4e602fdf11854336b4fd6d0d6fb9bc95 2024-12-12T19:34:37,439 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/4e602fdf11854336b4fd6d0d6fb9bc95, entries=150, sequenceid=364, filesize=12.0 K 2024-12-12T19:34:37,446 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d2250407ef6b64ed659c4133a7c4d89c in 1033ms, sequenceid=364, compaction requested=true 2024-12-12T19:34:37,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:37,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:37,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:37,446 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:37,446 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:37,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:37,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:37,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:37,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:37,450 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42533 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:37,450 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/A is initiating minor compaction (all files) 2024-12-12T19:34:37,450 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/A in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:37,451 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/e19d4115a50c4529b9c1c0717d1035d3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/11805f4864d6411da514daa9c9237e77, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/92804db83a6545dc990ffe7fa712d61e] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=41.5 K 2024-12-12T19:34:37,454 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:37,454 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/B is initiating minor compaction (all files) 2024-12-12T19:34:37,454 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/B in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:37,454 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/19eb79fbd438420697202b0b6e8e06ab, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/fcbef3e3fd414666b5c687712b73b536, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/6bed93737cf043ed8146333712061387] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=36.8 K 2024-12-12T19:34:37,455 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting e19d4115a50c4529b9c1c0717d1035d3, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1734032074564 2024-12-12T19:34:37,455 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 19eb79fbd438420697202b0b6e8e06ab, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1734032074564 2024-12-12T19:34:37,455 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11805f4864d6411da514daa9c9237e77, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1734032075499 2024-12-12T19:34:37,455 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92804db83a6545dc990ffe7fa712d61e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1734032076000 2024-12-12T19:34:37,455 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting fcbef3e3fd414666b5c687712b73b536, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1734032075545 2024-12-12T19:34:37,456 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 6bed93737cf043ed8146333712061387, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1734032076067 2024-12-12T19:34:37,496 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#A#compaction#420 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:37,496 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/cede96dd21374119a919eb085718da8c is 50, key is test_row_0/A:col10/1734032076067/Put/seqid=0 2024-12-12T19:34:37,504 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#B#compaction#421 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:37,504 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/ccd8f040d33344fe8924fe4625f42bb5 is 50, key is test_row_0/B:col10/1734032076067/Put/seqid=0 2024-12-12T19:34:37,533 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:37,534 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T19:34:37,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:37,535 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T19:34:37,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:37,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:37,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:37,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:37,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:37,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:37,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742324_1500 (size=13153) 2024-12-12T19:34:37,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742325_1501 (size=13153) 2024-12-12T19:34:37,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/6d2e2acd6fea4d79960adcc07ca6470c is 50, key is test_row_0/A:col10/1734032076472/Put/seqid=0 2024-12-12T19:34:37,573 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/cede96dd21374119a919eb085718da8c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/cede96dd21374119a919eb085718da8c 2024-12-12T19:34:37,583 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/ccd8f040d33344fe8924fe4625f42bb5 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/ccd8f040d33344fe8924fe4625f42bb5 2024-12-12T19:34:37,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742326_1502 (size=12301) 2024-12-12T19:34:37,612 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:37,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:37,612 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/B of d2250407ef6b64ed659c4133a7c4d89c into ccd8f040d33344fe8924fe4625f42bb5(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:37,612 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:37,613 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/B, priority=13, startTime=1734032077446; duration=0sec 2024-12-12T19:34:37,615 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:37,615 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:B 2024-12-12T19:34:37,615 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:37,620 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/A of d2250407ef6b64ed659c4133a7c4d89c into cede96dd21374119a919eb085718da8c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:37,620 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:37,620 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/A, priority=13, startTime=1734032077446; duration=0sec 2024-12-12T19:34:37,620 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:37,620 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:A 2024-12-12T19:34:37,622 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:37,622 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/C is initiating minor compaction (all files) 2024-12-12T19:34:37,622 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/C in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:37,622 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/e952082fb1604ef9ab9120afe91b5016, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/918ed26a8bbb48dea6e47ec00ab2ed29, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/4e602fdf11854336b4fd6d0d6fb9bc95] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=36.8 K 2024-12-12T19:34:37,623 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting e952082fb1604ef9ab9120afe91b5016, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1734032074564 2024-12-12T19:34:37,623 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 918ed26a8bbb48dea6e47ec00ab2ed29, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1734032075545 2024-12-12T19:34:37,623 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e602fdf11854336b4fd6d0d6fb9bc95, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1734032076067 2024-12-12T19:34:37,643 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#C#compaction#423 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:37,644 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/39ad26a510424435a2d497674a0fb25f is 50, key is test_row_0/C:col10/1734032076067/Put/seqid=0 2024-12-12T19:34:37,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742327_1503 (size=13153) 2024-12-12T19:34:37,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:37,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032137799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:37,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:37,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032137808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032137814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:37,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:37,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032137918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:37,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:37,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032137921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:37,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:37,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032137928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:38,003 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/6d2e2acd6fea4d79960adcc07ca6470c 2024-12-12T19:34:38,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/caa7d363d323424fab07fdd683d57e9a is 50, key is test_row_0/B:col10/1734032076472/Put/seqid=0 2024-12-12T19:34:38,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742328_1504 (size=12301) 2024-12-12T19:34:38,083 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/caa7d363d323424fab07fdd683d57e9a 2024-12-12T19:34:38,099 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/39ad26a510424435a2d497674a0fb25f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/39ad26a510424435a2d497674a0fb25f 2024-12-12T19:34:38,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:38,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032138128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:38,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:38,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032138128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:38,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/ef6f5f20080a46a7948ecc244b955a79 is 50, key is test_row_0/C:col10/1734032076472/Put/seqid=0 2024-12-12T19:34:38,138 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032138137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:38,139 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/C of d2250407ef6b64ed659c4133a7c4d89c into 39ad26a510424435a2d497674a0fb25f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:38,139 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:38,139 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/C, priority=13, startTime=1734032077447; duration=0sec 2024-12-12T19:34:38,139 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:38,139 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:C 2024-12-12T19:34:38,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742329_1505 (size=12301) 2024-12-12T19:34:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T19:34:38,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:38,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032138431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:38,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:38,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032138434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:38,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:38,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032138446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:38,584 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/ef6f5f20080a46a7948ecc244b955a79 2024-12-12T19:34:38,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/6d2e2acd6fea4d79960adcc07ca6470c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/6d2e2acd6fea4d79960adcc07ca6470c 2024-12-12T19:34:38,654 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/6d2e2acd6fea4d79960adcc07ca6470c, entries=150, sequenceid=376, filesize=12.0 K 2024-12-12T19:34:38,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/caa7d363d323424fab07fdd683d57e9a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/caa7d363d323424fab07fdd683d57e9a 2024-12-12T19:34:38,666 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/caa7d363d323424fab07fdd683d57e9a, entries=150, sequenceid=376, filesize=12.0 K 2024-12-12T19:34:38,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/ef6f5f20080a46a7948ecc244b955a79 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/ef6f5f20080a46a7948ecc244b955a79 2024-12-12T19:34:38,700 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/ef6f5f20080a46a7948ecc244b955a79, entries=150, sequenceid=376, filesize=12.0 K 2024-12-12T19:34:38,702 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d2250407ef6b64ed659c4133a7c4d89c in 1167ms, sequenceid=376, compaction requested=false 2024-12-12T19:34:38,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:38,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:38,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-12-12T19:34:38,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-12-12T19:34:38,713 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-12T19:34:38,713 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6230 sec 2024-12-12T19:34:38,715 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 2.6330 sec 2024-12-12T19:34:38,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:38,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T19:34:38,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:38,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:38,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:38,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:38,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:38,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:38,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/d3e5a83f97a04d48965f41489e53ca04 is 50, key is test_row_0/A:col10/1734032078765/Put/seqid=0 2024-12-12T19:34:38,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742330_1506 (size=12301) 2024-12-12T19:34:38,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/d3e5a83f97a04d48965f41489e53ca04 2024-12-12T19:34:38,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:38,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032138821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:38,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:38,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032138823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:38,855 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/c61c2baad9a8449a90560d9b4fe5bbe5 is 50, key is test_row_0/B:col10/1734032078765/Put/seqid=0 2024-12-12T19:34:38,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742331_1507 (size=12301) 2024-12-12T19:34:38,875 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/c61c2baad9a8449a90560d9b4fe5bbe5 2024-12-12T19:34:38,918 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/c4c364207d614c65855b9115ef177f03 is 50, key is test_row_0/C:col10/1734032078765/Put/seqid=0 2024-12-12T19:34:38,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:38,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032138940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:38,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742332_1508 (size=12301) 2024-12-12T19:34:38,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:38,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032138940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:38,943 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/c4c364207d614c65855b9115ef177f03 2024-12-12T19:34:38,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:38,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032138941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:38,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/d3e5a83f97a04d48965f41489e53ca04 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/d3e5a83f97a04d48965f41489e53ca04 2024-12-12T19:34:38,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:38,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032138942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:38,949 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/d3e5a83f97a04d48965f41489e53ca04, entries=150, sequenceid=406, filesize=12.0 K 2024-12-12T19:34:38,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/c61c2baad9a8449a90560d9b4fe5bbe5 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/c61c2baad9a8449a90560d9b4fe5bbe5 2024-12-12T19:34:38,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:38,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032138959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:38,962 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/c61c2baad9a8449a90560d9b4fe5bbe5, entries=150, sequenceid=406, filesize=12.0 K 2024-12-12T19:34:38,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/c4c364207d614c65855b9115ef177f03 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/c4c364207d614c65855b9115ef177f03 2024-12-12T19:34:38,973 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/c4c364207d614c65855b9115ef177f03, entries=150, sequenceid=406, filesize=12.0 K 2024-12-12T19:34:38,974 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for d2250407ef6b64ed659c4133a7c4d89c in 208ms, sequenceid=406, compaction requested=true 2024-12-12T19:34:38,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:38,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:38,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:38,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:38,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:34:38,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:38,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-12T19:34:38,975 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:38,977 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:38,978 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:38,978 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/C is initiating minor compaction (all files) 2024-12-12T19:34:38,978 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/C in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:38,978 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:38,978 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/A is initiating minor compaction (all files) 2024-12-12T19:34:38,978 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/39ad26a510424435a2d497674a0fb25f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/ef6f5f20080a46a7948ecc244b955a79, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/c4c364207d614c65855b9115ef177f03] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=36.9 K 2024-12-12T19:34:38,978 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/A in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:38,979 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/cede96dd21374119a919eb085718da8c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/6d2e2acd6fea4d79960adcc07ca6470c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/d3e5a83f97a04d48965f41489e53ca04] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=36.9 K 2024-12-12T19:34:38,979 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39ad26a510424435a2d497674a0fb25f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1734032076067 2024-12-12T19:34:38,979 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting cede96dd21374119a919eb085718da8c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1734032076067 2024-12-12T19:34:38,980 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef6f5f20080a46a7948ecc244b955a79, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1734032076449 2024-12-12T19:34:38,981 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d2e2acd6fea4d79960adcc07ca6470c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1734032076449 2024-12-12T19:34:38,985 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting d3e5a83f97a04d48965f41489e53ca04, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1734032077797 2024-12-12T19:34:38,985 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4c364207d614c65855b9115ef177f03, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1734032077797 2024-12-12T19:34:39,001 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#C#compaction#429 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:39,002 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/6c613e5fe09a4c42868b6ff55903626d is 50, key is test_row_0/C:col10/1734032078765/Put/seqid=0 2024-12-12T19:34:39,004 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#A#compaction#430 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:39,004 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/ef26caac81f34faba465219e45e53c81 is 50, key is test_row_0/A:col10/1734032078765/Put/seqid=0 2024-12-12T19:34:39,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742333_1509 (size=13255) 2024-12-12T19:34:39,034 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/6c613e5fe09a4c42868b6ff55903626d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/6c613e5fe09a4c42868b6ff55903626d 2024-12-12T19:34:39,044 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/C of d2250407ef6b64ed659c4133a7c4d89c into 6c613e5fe09a4c42868b6ff55903626d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:39,044 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:39,044 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/C, priority=13, startTime=1734032078974; duration=0sec 2024-12-12T19:34:39,044 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:39,044 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:C 2024-12-12T19:34:39,044 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:39,045 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:39,045 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/B is initiating minor compaction (all files) 2024-12-12T19:34:39,045 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/B in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:39,045 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/ccd8f040d33344fe8924fe4625f42bb5, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/caa7d363d323424fab07fdd683d57e9a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/c61c2baad9a8449a90560d9b4fe5bbe5] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=36.9 K 2024-12-12T19:34:39,045 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting ccd8f040d33344fe8924fe4625f42bb5, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1734032076067 2024-12-12T19:34:39,046 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting caa7d363d323424fab07fdd683d57e9a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1734032076449 2024-12-12T19:34:39,046 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting c61c2baad9a8449a90560d9b4fe5bbe5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1734032077797 2024-12-12T19:34:39,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742334_1510 (size=13255) 2024-12-12T19:34:39,057 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#B#compaction#431 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:39,057 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/7d589798aad24cea94941a0c23f17bd9 is 50, key is test_row_0/B:col10/1734032078765/Put/seqid=0 2024-12-12T19:34:39,064 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/ef26caac81f34faba465219e45e53c81 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/ef26caac81f34faba465219e45e53c81 2024-12-12T19:34:39,070 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/A of d2250407ef6b64ed659c4133a7c4d89c into ef26caac81f34faba465219e45e53c81(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:39,070 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:39,070 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/A, priority=13, startTime=1734032078974; duration=0sec 2024-12-12T19:34:39,070 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:39,070 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:A 2024-12-12T19:34:39,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742335_1511 (size=13255) 2024-12-12T19:34:39,143 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T19:34:39,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:39,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:39,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:39,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:39,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:39,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:39,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:39,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/652983fa6d44478087aca86c051ee9a3 is 50, key is test_row_0/A:col10/1734032078818/Put/seqid=0 2024-12-12T19:34:39,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742336_1512 (size=12301) 2024-12-12T19:34:39,214 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/652983fa6d44478087aca86c051ee9a3 2024-12-12T19:34:39,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/e2e4ee2d413e4485a5d39b6095bb4bbb is 50, key is test_row_0/B:col10/1734032078818/Put/seqid=0 2024-12-12T19:34:39,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:39,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032139263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:39,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:39,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032139266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:39,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742337_1513 (size=12301) 2024-12-12T19:34:39,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/e2e4ee2d413e4485a5d39b6095bb4bbb 2024-12-12T19:34:39,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/b7f9e8d0f2524b2a9c34af6471a31dd2 is 50, key is test_row_0/C:col10/1734032078818/Put/seqid=0 2024-12-12T19:34:39,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:39,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032139368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:39,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:39,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032139374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:39,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742338_1514 (size=12301) 2024-12-12T19:34:39,400 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/b7f9e8d0f2524b2a9c34af6471a31dd2 2024-12-12T19:34:39,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/652983fa6d44478087aca86c051ee9a3 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/652983fa6d44478087aca86c051ee9a3 2024-12-12T19:34:39,488 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/652983fa6d44478087aca86c051ee9a3, entries=150, sequenceid=419, filesize=12.0 K 2024-12-12T19:34:39,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/e2e4ee2d413e4485a5d39b6095bb4bbb as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/e2e4ee2d413e4485a5d39b6095bb4bbb 2024-12-12T19:34:39,526 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/7d589798aad24cea94941a0c23f17bd9 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/7d589798aad24cea94941a0c23f17bd9 2024-12-12T19:34:39,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/e2e4ee2d413e4485a5d39b6095bb4bbb, entries=150, sequenceid=419, filesize=12.0 K 2024-12-12T19:34:39,547 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/b7f9e8d0f2524b2a9c34af6471a31dd2 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/b7f9e8d0f2524b2a9c34af6471a31dd2 2024-12-12T19:34:39,558 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/B of d2250407ef6b64ed659c4133a7c4d89c into 7d589798aad24cea94941a0c23f17bd9(size=12.9 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:39,559 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:39,559 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/B, priority=13, startTime=1734032078974; duration=0sec 2024-12-12T19:34:39,559 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:39,559 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:B 2024-12-12T19:34:39,568 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/b7f9e8d0f2524b2a9c34af6471a31dd2, entries=150, sequenceid=419, filesize=12.0 K 2024-12-12T19:34:39,579 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d2250407ef6b64ed659c4133a7c4d89c in 436ms, sequenceid=419, compaction requested=false 2024-12-12T19:34:39,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:39,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:39,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T19:34:39,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:39,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:39,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:39,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:39,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:39,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:39,614 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/dee8e70e8ec241249282c9c7e3e5d18b is 50, key is test_row_0/A:col10/1734032079596/Put/seqid=0 2024-12-12T19:34:39,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:39,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032139648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:39,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:39,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032139649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:39,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742339_1515 (size=12301) 2024-12-12T19:34:39,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/dee8e70e8ec241249282c9c7e3e5d18b 2024-12-12T19:34:39,697 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/753b3d936a64493ca25931ca4f7827fa is 50, key is test_row_0/B:col10/1734032079596/Put/seqid=0 2024-12-12T19:34:39,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742340_1516 (size=12301) 2024-12-12T19:34:39,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:39,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032139759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:39,762 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/753b3d936a64493ca25931ca4f7827fa 2024-12-12T19:34:39,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:39,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032139763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:39,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/d74636c366674ce1b0e627ef7b4439a6 is 50, key is test_row_0/C:col10/1734032079596/Put/seqid=0 2024-12-12T19:34:39,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742341_1517 (size=12301) 2024-12-12T19:34:39,955 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:39,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51380 deadline: 1734032139953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:39,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:39,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51454 deadline: 1734032139952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:39,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032139965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:39,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:39,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51370 deadline: 1734032139967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:39,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032139987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:40,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T19:34:40,200 INFO [Thread-1919 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-12-12T19:34:40,207 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:40,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-12T19:34:40,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T19:34:40,215 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:40,217 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:40,217 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:40,243 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/d74636c366674ce1b0e627ef7b4439a6 2024-12-12T19:34:40,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/dee8e70e8ec241249282c9c7e3e5d18b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/dee8e70e8ec241249282c9c7e3e5d18b 2024-12-12T19:34:40,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/dee8e70e8ec241249282c9c7e3e5d18b, entries=150, sequenceid=447, filesize=12.0 K 2024-12-12T19:34:40,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/753b3d936a64493ca25931ca4f7827fa as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/753b3d936a64493ca25931ca4f7827fa 2024-12-12T19:34:40,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:40,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032140288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:40,296 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/753b3d936a64493ca25931ca4f7827fa, entries=150, sequenceid=447, filesize=12.0 K 2024-12-12T19:34:40,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/d74636c366674ce1b0e627ef7b4439a6 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/d74636c366674ce1b0e627ef7b4439a6 2024-12-12T19:34:40,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:40,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032140301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:40,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T19:34:40,329 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/d74636c366674ce1b0e627ef7b4439a6, entries=150, sequenceid=447, filesize=12.0 K 2024-12-12T19:34:40,342 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for d2250407ef6b64ed659c4133a7c4d89c in 742ms, sequenceid=447, compaction requested=true 2024-12-12T19:34:40,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:40,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:40,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:40,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:40,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:34:40,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:40,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-12T19:34:40,343 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:40,345 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:40,351 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:40,351 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/A is initiating minor compaction (all files) 2024-12-12T19:34:40,351 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/A in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:40,351 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/ef26caac81f34faba465219e45e53c81, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/652983fa6d44478087aca86c051ee9a3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/dee8e70e8ec241249282c9c7e3e5d18b] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=37.0 K 2024-12-12T19:34:40,355 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting ef26caac81f34faba465219e45e53c81, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1734032077797 2024-12-12T19:34:40,358 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:40,358 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/C is initiating minor compaction (all files) 2024-12-12T19:34:40,359 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/C in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:40,359 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/6c613e5fe09a4c42868b6ff55903626d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/b7f9e8d0f2524b2a9c34af6471a31dd2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/d74636c366674ce1b0e627ef7b4439a6] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=37.0 K 2024-12-12T19:34:40,363 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c613e5fe09a4c42868b6ff55903626d, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1734032077797 2024-12-12T19:34:40,363 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 652983fa6d44478087aca86c051ee9a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1734032078818 2024-12-12T19:34:40,364 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7f9e8d0f2524b2a9c34af6471a31dd2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1734032078818 2024-12-12T19:34:40,364 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting dee8e70e8ec241249282c9c7e3e5d18b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1734032079265 2024-12-12T19:34:40,365 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting d74636c366674ce1b0e627ef7b4439a6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1734032079265 2024-12-12T19:34:40,371 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:40,375 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-12T19:34:40,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:40,376 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-12T19:34:40,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:40,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:40,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:40,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:40,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:40,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:40,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/81cd4a49a7f649b1b2f7ea0f28d6d040 is 50, key is test_row_0/A:col10/1734032079636/Put/seqid=0 2024-12-12T19:34:40,410 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#A#compaction#439 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:40,411 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/a1d9322eeb0b4f0486c73cd7f2b9acf7 is 50, key is test_row_0/A:col10/1734032079596/Put/seqid=0 2024-12-12T19:34:40,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742342_1518 (size=12301) 2024-12-12T19:34:40,417 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#C#compaction#440 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:40,418 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/fe90d85a60aa4d74b6dc7dbf326c62b0 is 50, key is test_row_0/C:col10/1734032079596/Put/seqid=0 2024-12-12T19:34:40,418 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=456 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/81cd4a49a7f649b1b2f7ea0f28d6d040 2024-12-12T19:34:40,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742343_1519 (size=13357) 2024-12-12T19:34:40,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742344_1520 (size=13357) 2024-12-12T19:34:40,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/c4ebf62a1cc44613a008a14ef5c66ce6 is 50, key is test_row_0/B:col10/1734032079636/Put/seqid=0 2024-12-12T19:34:40,475 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/a1d9322eeb0b4f0486c73cd7f2b9acf7 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/a1d9322eeb0b4f0486c73cd7f2b9acf7 2024-12-12T19:34:40,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742345_1521 (size=12301) 2024-12-12T19:34:40,494 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=456 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/c4ebf62a1cc44613a008a14ef5c66ce6 2024-12-12T19:34:40,499 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/A of d2250407ef6b64ed659c4133a7c4d89c into a1d9322eeb0b4f0486c73cd7f2b9acf7(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:40,499 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:40,499 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/A, priority=13, startTime=1734032080342; duration=0sec 2024-12-12T19:34:40,499 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:40,499 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:A 2024-12-12T19:34:40,499 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:40,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T19:34:40,522 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:40,522 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/fe90d85a60aa4d74b6dc7dbf326c62b0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/fe90d85a60aa4d74b6dc7dbf326c62b0 2024-12-12T19:34:40,522 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/B is initiating minor compaction (all files) 2024-12-12T19:34:40,522 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/B in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:40,522 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/7d589798aad24cea94941a0c23f17bd9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/e2e4ee2d413e4485a5d39b6095bb4bbb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/753b3d936a64493ca25931ca4f7827fa] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=37.0 K 2024-12-12T19:34:40,527 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d589798aad24cea94941a0c23f17bd9, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1734032077797 2024-12-12T19:34:40,532 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting e2e4ee2d413e4485a5d39b6095bb4bbb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1734032078818 2024-12-12T19:34:40,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/06f406c40fa74a9ba55bd5d236a18a94 is 50, key is test_row_0/C:col10/1734032079636/Put/seqid=0 2024-12-12T19:34:40,539 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 753b3d936a64493ca25931ca4f7827fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1734032079265 2024-12-12T19:34:40,548 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/C of d2250407ef6b64ed659c4133a7c4d89c into fe90d85a60aa4d74b6dc7dbf326c62b0(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:40,548 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:40,548 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/C, priority=13, startTime=1734032080342; duration=0sec 2024-12-12T19:34:40,548 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:40,548 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:C 2024-12-12T19:34:40,564 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#B#compaction#443 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:40,564 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/dc189028329647ada69c73e833203341 is 50, key is test_row_0/B:col10/1734032079596/Put/seqid=0 2024-12-12T19:34:40,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742346_1522 (size=12301) 2024-12-12T19:34:40,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742347_1523 (size=13357) 2024-12-12T19:34:40,651 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/dc189028329647ada69c73e833203341 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/dc189028329647ada69c73e833203341 2024-12-12T19:34:40,666 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/B of d2250407ef6b64ed659c4133a7c4d89c into dc189028329647ada69c73e833203341(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:40,666 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:40,666 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/B, priority=13, startTime=1734032080342; duration=0sec 2024-12-12T19:34:40,666 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:40,666 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:B 2024-12-12T19:34:40,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T19:34:40,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. as already flushing 2024-12-12T19:34:40,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:40,972 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=456 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/06f406c40fa74a9ba55bd5d236a18a94 2024-12-12T19:34:40,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:40,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032140971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:40,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:40,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032140970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:40,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/81cd4a49a7f649b1b2f7ea0f28d6d040 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/81cd4a49a7f649b1b2f7ea0f28d6d040 2024-12-12T19:34:40,984 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/81cd4a49a7f649b1b2f7ea0f28d6d040, entries=150, sequenceid=456, filesize=12.0 K 2024-12-12T19:34:40,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/c4ebf62a1cc44613a008a14ef5c66ce6 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/c4ebf62a1cc44613a008a14ef5c66ce6 2024-12-12T19:34:40,997 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/c4ebf62a1cc44613a008a14ef5c66ce6, entries=150, sequenceid=456, filesize=12.0 K 2024-12-12T19:34:41,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/06f406c40fa74a9ba55bd5d236a18a94 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/06f406c40fa74a9ba55bd5d236a18a94 2024-12-12T19:34:41,011 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/06f406c40fa74a9ba55bd5d236a18a94, entries=150, sequenceid=456, filesize=12.0 K 2024-12-12T19:34:41,012 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for d2250407ef6b64ed659c4133a7c4d89c in 636ms, sequenceid=456, compaction requested=false 2024-12-12T19:34:41,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:41,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:41,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-12T19:34:41,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-12T19:34:41,014 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-12T19:34:41,014 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 796 msec 2024-12-12T19:34:41,014 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 807 msec 2024-12-12T19:34:41,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:41,098 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-12T19:34:41,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:41,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:41,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:41,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:41,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:41,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:41,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/66fcb805c0844050a73642d897c931af is 50, key is test_row_0/A:col10/1734032081097/Put/seqid=0 2024-12-12T19:34:41,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:41,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032141142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:41,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:41,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032141148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:41,180 DEBUG [Thread-1928 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d5efb7a to 127.0.0.1:52216 2024-12-12T19:34:41,180 DEBUG [Thread-1928 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:41,181 DEBUG [Thread-1924 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17e5a47d to 127.0.0.1:52216 2024-12-12T19:34:41,181 DEBUG [Thread-1922 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3e96b8ad to 127.0.0.1:52216 2024-12-12T19:34:41,181 DEBUG [Thread-1922 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:41,181 DEBUG [Thread-1924 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:41,183 DEBUG [Thread-1920 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1dc5e114 to 127.0.0.1:52216 2024-12-12T19:34:41,183 DEBUG [Thread-1920 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:41,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742348_1524 (size=12301) 2024-12-12T19:34:41,188 DEBUG [Thread-1926 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d9954b7 to 127.0.0.1:52216 2024-12-12T19:34:41,188 DEBUG [Thread-1926 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:41,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:41,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032141250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:41,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:41,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032141253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:41,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T19:34:41,315 INFO [Thread-1919 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-12T19:34:41,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:41,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51396 deadline: 1734032141453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:41,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:41,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51440 deadline: 1734032141454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:41,585 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/66fcb805c0844050a73642d897c931af 2024-12-12T19:34:41,592 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/f9bec7da9a7544c4afdeb590e9a0d9b9 is 50, key is test_row_0/B:col10/1734032081097/Put/seqid=0 2024-12-12T19:34:41,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742349_1525 (size=12301) 2024-12-12T19:34:41,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/f9bec7da9a7544c4afdeb590e9a0d9b9 2024-12-12T19:34:41,609 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/0a3c4749a09d4fd2a50ab213a8f1894c is 50, key is test_row_0/C:col10/1734032081097/Put/seqid=0 2024-12-12T19:34:41,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742350_1526 (size=12301) 2024-12-12T19:34:41,616 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/0a3c4749a09d4fd2a50ab213a8f1894c 2024-12-12T19:34:41,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/66fcb805c0844050a73642d897c931af as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/66fcb805c0844050a73642d897c931af 2024-12-12T19:34:41,623 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/66fcb805c0844050a73642d897c931af, entries=150, sequenceid=489, filesize=12.0 K 2024-12-12T19:34:41,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/f9bec7da9a7544c4afdeb590e9a0d9b9 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/f9bec7da9a7544c4afdeb590e9a0d9b9 2024-12-12T19:34:41,627 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/f9bec7da9a7544c4afdeb590e9a0d9b9, entries=150, sequenceid=489, filesize=12.0 K 2024-12-12T19:34:41,627 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/0a3c4749a09d4fd2a50ab213a8f1894c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0a3c4749a09d4fd2a50ab213a8f1894c 2024-12-12T19:34:41,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0a3c4749a09d4fd2a50ab213a8f1894c, entries=150, sequenceid=489, filesize=12.0 K 2024-12-12T19:34:41,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for d2250407ef6b64ed659c4133a7c4d89c in 533ms, sequenceid=489, compaction requested=true 2024-12-12T19:34:41,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:41,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:41,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:41,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:41,632 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:41,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:41,632 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:41,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2250407ef6b64ed659c4133a7c4d89c:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:41,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:41,632 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:41,632 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:41,632 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/A is initiating minor compaction (all files) 2024-12-12T19:34:41,632 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/B is initiating minor compaction (all files) 2024-12-12T19:34:41,633 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/A in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:41,633 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/B in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:41,633 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/dc189028329647ada69c73e833203341, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/c4ebf62a1cc44613a008a14ef5c66ce6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/f9bec7da9a7544c4afdeb590e9a0d9b9] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=37.1 K 2024-12-12T19:34:41,633 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/a1d9322eeb0b4f0486c73cd7f2b9acf7, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/81cd4a49a7f649b1b2f7ea0f28d6d040, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/66fcb805c0844050a73642d897c931af] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=37.1 K 2024-12-12T19:34:41,633 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1d9322eeb0b4f0486c73cd7f2b9acf7, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1734032079265 2024-12-12T19:34:41,633 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting dc189028329647ada69c73e833203341, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1734032079265 2024-12-12T19:34:41,633 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81cd4a49a7f649b1b2f7ea0f28d6d040, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=456, earliestPutTs=1734032079611 2024-12-12T19:34:41,633 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting c4ebf62a1cc44613a008a14ef5c66ce6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=456, earliestPutTs=1734032079611 2024-12-12T19:34:41,633 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66fcb805c0844050a73642d897c931af, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=489, earliestPutTs=1734032080922 2024-12-12T19:34:41,633 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting f9bec7da9a7544c4afdeb590e9a0d9b9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=489, earliestPutTs=1734032080922 2024-12-12T19:34:41,638 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#A#compaction#447 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:41,638 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#B#compaction#448 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:41,639 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/0a51c2b3410a4bad8c328aa068232d91 is 50, key is test_row_0/A:col10/1734032081097/Put/seqid=0 2024-12-12T19:34:41,639 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/3dcb94aba613497a839d2d95902fd4bb is 50, key is test_row_0/B:col10/1734032081097/Put/seqid=0 2024-12-12T19:34:41,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742351_1527 (size=13459) 2024-12-12T19:34:41,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742352_1528 (size=13459) 2024-12-12T19:34:41,654 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/3dcb94aba613497a839d2d95902fd4bb as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/3dcb94aba613497a839d2d95902fd4bb 2024-12-12T19:34:41,657 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/B of d2250407ef6b64ed659c4133a7c4d89c into 3dcb94aba613497a839d2d95902fd4bb(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:41,657 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:41,657 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/B, priority=13, startTime=1734032081632; duration=0sec 2024-12-12T19:34:41,657 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:41,657 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:B 2024-12-12T19:34:41,657 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:41,658 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:41,658 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): d2250407ef6b64ed659c4133a7c4d89c/C is initiating minor compaction (all files) 2024-12-12T19:34:41,658 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2250407ef6b64ed659c4133a7c4d89c/C in TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:41,658 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/fe90d85a60aa4d74b6dc7dbf326c62b0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/06f406c40fa74a9ba55bd5d236a18a94, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0a3c4749a09d4fd2a50ab213a8f1894c] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp, totalSize=37.1 K 2024-12-12T19:34:41,659 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting fe90d85a60aa4d74b6dc7dbf326c62b0, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1734032079265 2024-12-12T19:34:41,659 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 06f406c40fa74a9ba55bd5d236a18a94, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=456, earliestPutTs=1734032079611 2024-12-12T19:34:41,659 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a3c4749a09d4fd2a50ab213a8f1894c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=489, earliestPutTs=1734032080922 2024-12-12T19:34:41,668 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2250407ef6b64ed659c4133a7c4d89c#C#compaction#449 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:41,669 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/8608a0f841444cb08bf7307a551174e0 is 50, key is test_row_0/C:col10/1734032081097/Put/seqid=0 2024-12-12T19:34:41,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742353_1529 (size=13459) 2024-12-12T19:34:41,756 DEBUG [Thread-1911 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f7c40ba to 127.0.0.1:52216 2024-12-12T19:34:41,756 DEBUG [Thread-1911 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:41,757 DEBUG [Thread-1915 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0f2423f3 to 127.0.0.1:52216 2024-12-12T19:34:41,757 DEBUG [Thread-1915 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:41,971 DEBUG [Thread-1917 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x184771cf to 127.0.0.1:52216 2024-12-12T19:34:41,971 DEBUG [Thread-1917 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:41,975 DEBUG [Thread-1909 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b7f20c4 to 127.0.0.1:52216 2024-12-12T19:34:41,976 DEBUG [Thread-1909 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:41,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:41,983 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2250407ef6b64ed659c4133a7c4d89c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T19:34:41,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=A 2024-12-12T19:34:41,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:41,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=B 2024-12-12T19:34:41,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:41,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2250407ef6b64ed659c4133a7c4d89c, store=C 2024-12-12T19:34:41,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:41,984 DEBUG [Thread-1913 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x41b0e7b6 to 127.0.0.1:52216 2024-12-12T19:34:41,984 DEBUG [Thread-1913 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:41,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 118 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 79 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 93 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 28 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1393 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4179 rows 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1414 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4242 rows 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1436 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4308 rows 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1410 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4230 rows 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1399 2024-12-12T19:34:41,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4197 rows 2024-12-12T19:34:41,985 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T19:34:41,985 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7daa5922 to 127.0.0.1:52216 2024-12-12T19:34:41,985 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:34:41,987 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T19:34:41,988 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T19:34:41,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:41,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T19:34:41,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/9f6774bc282a492d8cc52f799e304d06 is 50, key is test_row_0/A:col10/1734032081755/Put/seqid=0 2024-12-12T19:34:41,994 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734032081994"}]},"ts":"1734032081994"} 2024-12-12T19:34:41,998 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T19:34:42,018 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T19:34:42,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742354_1530 (size=12301) 2024-12-12T19:34:42,026 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T19:34:42,028 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2250407ef6b64ed659c4133a7c4d89c, UNASSIGN}] 2024-12-12T19:34:42,029 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=138, ppid=137, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2250407ef6b64ed659c4133a7c4d89c, UNASSIGN 2024-12-12T19:34:42,029 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=d2250407ef6b64ed659c4133a7c4d89c, regionState=CLOSING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:42,030 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=501 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/9f6774bc282a492d8cc52f799e304d06 2024-12-12T19:34:42,031 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T19:34:42,031 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; CloseRegionProcedure d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:34:42,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/09859cb689d44ae390db1234a13e325c is 50, key is test_row_0/B:col10/1734032081755/Put/seqid=0 2024-12-12T19:34:42,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742355_1531 (size=12301) 2024-12-12T19:34:42,049 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=501 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/09859cb689d44ae390db1234a13e325c 2024-12-12T19:34:42,072 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/0a51c2b3410a4bad8c328aa068232d91 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/0a51c2b3410a4bad8c328aa068232d91 2024-12-12T19:34:42,074 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/458dd0b1103343f6b44b9d128e9a0ae4 is 50, key is test_row_0/C:col10/1734032081755/Put/seqid=0 2024-12-12T19:34:42,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742356_1532 (size=12301) 2024-12-12T19:34:42,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T19:34:42,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=501 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/458dd0b1103343f6b44b9d128e9a0ae4 2024-12-12T19:34:42,101 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/A of d2250407ef6b64ed659c4133a7c4d89c into 0a51c2b3410a4bad8c328aa068232d91(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:42,101 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:42,101 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/A, priority=13, startTime=1734032081632; duration=0sec 2024-12-12T19:34:42,101 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:42,101 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:A 2024-12-12T19:34:42,104 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/8608a0f841444cb08bf7307a551174e0 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/8608a0f841444cb08bf7307a551174e0 2024-12-12T19:34:42,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/A/9f6774bc282a492d8cc52f799e304d06 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/9f6774bc282a492d8cc52f799e304d06 2024-12-12T19:34:42,126 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2250407ef6b64ed659c4133a7c4d89c/C of d2250407ef6b64ed659c4133a7c4d89c into 8608a0f841444cb08bf7307a551174e0(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:42,126 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:42,126 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c., storeName=d2250407ef6b64ed659c4133a7c4d89c/C, priority=13, startTime=1734032081632; duration=0sec 2024-12-12T19:34:42,126 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:42,126 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2250407ef6b64ed659c4133a7c4d89c:C 2024-12-12T19:34:42,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/9f6774bc282a492d8cc52f799e304d06, entries=150, sequenceid=501, filesize=12.0 K 2024-12-12T19:34:42,132 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/B/09859cb689d44ae390db1234a13e325c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/09859cb689d44ae390db1234a13e325c 2024-12-12T19:34:42,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/09859cb689d44ae390db1234a13e325c, entries=150, sequenceid=501, filesize=12.0 K 2024-12-12T19:34:42,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/.tmp/C/458dd0b1103343f6b44b9d128e9a0ae4 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/458dd0b1103343f6b44b9d128e9a0ae4 2024-12-12T19:34:42,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/458dd0b1103343f6b44b9d128e9a0ae4, entries=150, sequenceid=501, filesize=12.0 K 2024-12-12T19:34:42,141 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for d2250407ef6b64ed659c4133a7c4d89c in 158ms, sequenceid=501, compaction requested=false 2024-12-12T19:34:42,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:42,184 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:42,184 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] handler.UnassignRegionHandler(124): Close d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:42,184 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T19:34:42,185 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1681): Closing d2250407ef6b64ed659c4133a7c4d89c, disabling compactions & flushes 2024-12-12T19:34:42,185 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:42,185 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:42,185 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. after waiting 0 ms 2024-12-12T19:34:42,185 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:42,185 DEBUG [StoreCloser-TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/cb801cf7329f4663a2df75d31c49d3a6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/f992632eca724b32bda8ee77d2e2eac3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/19de58ffafe044d0a23b6b6b4193ccb3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/2663977b051a4c20ad280e9bda849502, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/081e8f7076cd49ea969083d7663b99fb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/d4bd14172f9b4e70a98a036e8c725799, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/026ed9a79eea4eafa81d650d1c90c21f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/b9dfb625ff284093baf37de4ca332601, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/1ae883b5a26749338f6e4e3cfc155eeb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/1e74f191329949e7a1602a6b14e2e6c9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/b077b0b574aa4ad494f83d5239ad71c9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/af4657ac7bf742059c9d2660b52dd821, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/6efcc3e5e3f945ac8b518b6bd0ad103c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/e0dd1a1e7d75411e81a9b2ac36506aef, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/fb934219e8c9490e9449d2dbff70f42a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/a085521d8c0c4f48a12e6f5c4b6d8cbe, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/f03dd185d0af43518304312bee352ccc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/282590085b33441488219dd41bf7ae3e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/0462f69833d04b3c997e347790afd3e4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/a754369f00d9442f8d54fcd3cd3f8048, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/83c86dd195f647529613a4d5257d72a8, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/e19d4115a50c4529b9c1c0717d1035d3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/01bc0509498040b2bffd3dc70331adf6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/11805f4864d6411da514daa9c9237e77, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/92804db83a6545dc990ffe7fa712d61e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/cede96dd21374119a919eb085718da8c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/6d2e2acd6fea4d79960adcc07ca6470c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/ef26caac81f34faba465219e45e53c81, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/d3e5a83f97a04d48965f41489e53ca04, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/652983fa6d44478087aca86c051ee9a3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/a1d9322eeb0b4f0486c73cd7f2b9acf7, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/dee8e70e8ec241249282c9c7e3e5d18b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/81cd4a49a7f649b1b2f7ea0f28d6d040, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/66fcb805c0844050a73642d897c931af] to archive 2024-12-12T19:34:42,188 DEBUG [StoreCloser-TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:34:42,200 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/f992632eca724b32bda8ee77d2e2eac3 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/f992632eca724b32bda8ee77d2e2eac3 2024-12-12T19:34:42,200 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/2663977b051a4c20ad280e9bda849502 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/2663977b051a4c20ad280e9bda849502 2024-12-12T19:34:42,200 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/19de58ffafe044d0a23b6b6b4193ccb3 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/19de58ffafe044d0a23b6b6b4193ccb3 2024-12-12T19:34:42,202 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/026ed9a79eea4eafa81d650d1c90c21f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/026ed9a79eea4eafa81d650d1c90c21f 2024-12-12T19:34:42,202 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/081e8f7076cd49ea969083d7663b99fb to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/081e8f7076cd49ea969083d7663b99fb 2024-12-12T19:34:42,202 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/cb801cf7329f4663a2df75d31c49d3a6 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/cb801cf7329f4663a2df75d31c49d3a6 2024-12-12T19:34:42,203 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/b9dfb625ff284093baf37de4ca332601 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/b9dfb625ff284093baf37de4ca332601 2024-12-12T19:34:42,204 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/1ae883b5a26749338f6e4e3cfc155eeb to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/1ae883b5a26749338f6e4e3cfc155eeb 2024-12-12T19:34:42,205 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/1e74f191329949e7a1602a6b14e2e6c9 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/1e74f191329949e7a1602a6b14e2e6c9 2024-12-12T19:34:42,205 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/6efcc3e5e3f945ac8b518b6bd0ad103c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/6efcc3e5e3f945ac8b518b6bd0ad103c 2024-12-12T19:34:42,205 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/af4657ac7bf742059c9d2660b52dd821 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/af4657ac7bf742059c9d2660b52dd821 2024-12-12T19:34:42,205 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/e0dd1a1e7d75411e81a9b2ac36506aef to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/e0dd1a1e7d75411e81a9b2ac36506aef 2024-12-12T19:34:42,205 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/b077b0b574aa4ad494f83d5239ad71c9 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/b077b0b574aa4ad494f83d5239ad71c9 2024-12-12T19:34:42,206 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/fb934219e8c9490e9449d2dbff70f42a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/fb934219e8c9490e9449d2dbff70f42a 2024-12-12T19:34:42,207 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/d4bd14172f9b4e70a98a036e8c725799 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/d4bd14172f9b4e70a98a036e8c725799 2024-12-12T19:34:42,208 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/282590085b33441488219dd41bf7ae3e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/282590085b33441488219dd41bf7ae3e 2024-12-12T19:34:42,208 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/f03dd185d0af43518304312bee352ccc to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/f03dd185d0af43518304312bee352ccc 2024-12-12T19:34:42,208 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/a085521d8c0c4f48a12e6f5c4b6d8cbe to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/a085521d8c0c4f48a12e6f5c4b6d8cbe 2024-12-12T19:34:42,208 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/83c86dd195f647529613a4d5257d72a8 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/83c86dd195f647529613a4d5257d72a8 2024-12-12T19:34:42,208 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/0462f69833d04b3c997e347790afd3e4 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/0462f69833d04b3c997e347790afd3e4 2024-12-12T19:34:42,209 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/a754369f00d9442f8d54fcd3cd3f8048 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/a754369f00d9442f8d54fcd3cd3f8048 2024-12-12T19:34:42,209 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/e19d4115a50c4529b9c1c0717d1035d3 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/e19d4115a50c4529b9c1c0717d1035d3 2024-12-12T19:34:42,210 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/11805f4864d6411da514daa9c9237e77 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/11805f4864d6411da514daa9c9237e77 2024-12-12T19:34:42,210 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/92804db83a6545dc990ffe7fa712d61e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/92804db83a6545dc990ffe7fa712d61e 2024-12-12T19:34:42,211 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/6d2e2acd6fea4d79960adcc07ca6470c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/6d2e2acd6fea4d79960adcc07ca6470c 2024-12-12T19:34:42,211 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/cede96dd21374119a919eb085718da8c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/cede96dd21374119a919eb085718da8c 2024-12-12T19:34:42,212 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/d3e5a83f97a04d48965f41489e53ca04 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/d3e5a83f97a04d48965f41489e53ca04 2024-12-12T19:34:42,212 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/ef26caac81f34faba465219e45e53c81 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/ef26caac81f34faba465219e45e53c81 2024-12-12T19:34:42,212 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/652983fa6d44478087aca86c051ee9a3 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/652983fa6d44478087aca86c051ee9a3 2024-12-12T19:34:42,212 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/a1d9322eeb0b4f0486c73cd7f2b9acf7 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/a1d9322eeb0b4f0486c73cd7f2b9acf7 2024-12-12T19:34:42,213 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/dee8e70e8ec241249282c9c7e3e5d18b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/dee8e70e8ec241249282c9c7e3e5d18b 2024-12-12T19:34:42,213 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/81cd4a49a7f649b1b2f7ea0f28d6d040 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/81cd4a49a7f649b1b2f7ea0f28d6d040 2024-12-12T19:34:42,213 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/66fcb805c0844050a73642d897c931af to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/66fcb805c0844050a73642d897c931af 2024-12-12T19:34:42,215 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/01bc0509498040b2bffd3dc70331adf6 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/01bc0509498040b2bffd3dc70331adf6 2024-12-12T19:34:42,227 DEBUG [StoreCloser-TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/f1c2a04fed9b41d2a2875db872a2f989, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/4f03567fd4a6466dbf5ea80cac36d50f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/4e7c43e92bdf4d01881aa1dadf8b05df, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/bd089b1a4712483988a32d78aa027fec, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/844fc1cbd23e48db881a3dfbe893cbc0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/1c4cfd8ec5f24ec6aa7e9cd0974dbc62, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/8835f2cc125548b3b43f285a84e8cbbb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/551e09800bdf4ff8921802a8ef5a0178, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/5d08df2b30104da485120b85ff79cb15, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/27bcbd9d5291485190beb7078cae107d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/7019781fe8bd455da02171b7c8b5e0d4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/89873d353f604e41954fd470e17ca0f2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/dbe777b6b2d74593bd378599575d365a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/02a5829e5d584a099361512c1ddbef68, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/9803cd9ffb95401aa9b12a3be9d472a2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/e72980a6bf694cd2bbecd3bb02e4d34a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/d738a50d84924c00bda478b7eb19ef28, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/db3accb370724dfa97d5b624059eabde, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/084d0326b1af47c38bf6b8763de1eb3c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/653a83a7a92a48e0b829dd39d4b52e0a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/318abf853ea44d83a9c54ac7bd66f845, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/19eb79fbd438420697202b0b6e8e06ab, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/fe239fd00c5a473f815d5f8041480e10, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/fcbef3e3fd414666b5c687712b73b536, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/ccd8f040d33344fe8924fe4625f42bb5, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/6bed93737cf043ed8146333712061387, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/caa7d363d323424fab07fdd683d57e9a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/7d589798aad24cea94941a0c23f17bd9, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/c61c2baad9a8449a90560d9b4fe5bbe5, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/e2e4ee2d413e4485a5d39b6095bb4bbb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/dc189028329647ada69c73e833203341, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/753b3d936a64493ca25931ca4f7827fa, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/c4ebf62a1cc44613a008a14ef5c66ce6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/f9bec7da9a7544c4afdeb590e9a0d9b9] to archive 2024-12-12T19:34:42,228 DEBUG [StoreCloser-TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:34:42,233 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/4e7c43e92bdf4d01881aa1dadf8b05df to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/4e7c43e92bdf4d01881aa1dadf8b05df 2024-12-12T19:34:42,233 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/f1c2a04fed9b41d2a2875db872a2f989 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/f1c2a04fed9b41d2a2875db872a2f989 2024-12-12T19:34:42,234 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/1c4cfd8ec5f24ec6aa7e9cd0974dbc62 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/1c4cfd8ec5f24ec6aa7e9cd0974dbc62 2024-12-12T19:34:42,234 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/844fc1cbd23e48db881a3dfbe893cbc0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/844fc1cbd23e48db881a3dfbe893cbc0 2024-12-12T19:34:42,234 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/bd089b1a4712483988a32d78aa027fec to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/bd089b1a4712483988a32d78aa027fec 2024-12-12T19:34:42,235 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/4f03567fd4a6466dbf5ea80cac36d50f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/4f03567fd4a6466dbf5ea80cac36d50f 2024-12-12T19:34:42,235 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/551e09800bdf4ff8921802a8ef5a0178 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/551e09800bdf4ff8921802a8ef5a0178 2024-12-12T19:34:42,239 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/27bcbd9d5291485190beb7078cae107d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/27bcbd9d5291485190beb7078cae107d 2024-12-12T19:34:42,239 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/5d08df2b30104da485120b85ff79cb15 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/5d08df2b30104da485120b85ff79cb15 2024-12-12T19:34:42,242 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/8835f2cc125548b3b43f285a84e8cbbb to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/8835f2cc125548b3b43f285a84e8cbbb 2024-12-12T19:34:42,242 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/7019781fe8bd455da02171b7c8b5e0d4 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/7019781fe8bd455da02171b7c8b5e0d4 2024-12-12T19:34:42,242 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/dbe777b6b2d74593bd378599575d365a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/dbe777b6b2d74593bd378599575d365a 2024-12-12T19:34:42,242 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/02a5829e5d584a099361512c1ddbef68 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/02a5829e5d584a099361512c1ddbef68 2024-12-12T19:34:42,242 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/d738a50d84924c00bda478b7eb19ef28 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/d738a50d84924c00bda478b7eb19ef28 2024-12-12T19:34:42,242 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/89873d353f604e41954fd470e17ca0f2 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/89873d353f604e41954fd470e17ca0f2 2024-12-12T19:34:42,242 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/9803cd9ffb95401aa9b12a3be9d472a2 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/9803cd9ffb95401aa9b12a3be9d472a2 2024-12-12T19:34:42,243 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/e72980a6bf694cd2bbecd3bb02e4d34a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/e72980a6bf694cd2bbecd3bb02e4d34a 2024-12-12T19:34:42,243 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/653a83a7a92a48e0b829dd39d4b52e0a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/653a83a7a92a48e0b829dd39d4b52e0a 2024-12-12T19:34:42,243 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/db3accb370724dfa97d5b624059eabde to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/db3accb370724dfa97d5b624059eabde 2024-12-12T19:34:42,244 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/fe239fd00c5a473f815d5f8041480e10 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/fe239fd00c5a473f815d5f8041480e10 2024-12-12T19:34:42,244 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/084d0326b1af47c38bf6b8763de1eb3c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/084d0326b1af47c38bf6b8763de1eb3c 2024-12-12T19:34:42,244 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/318abf853ea44d83a9c54ac7bd66f845 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/318abf853ea44d83a9c54ac7bd66f845 2024-12-12T19:34:42,244 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/19eb79fbd438420697202b0b6e8e06ab to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/19eb79fbd438420697202b0b6e8e06ab 2024-12-12T19:34:42,244 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/fcbef3e3fd414666b5c687712b73b536 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/fcbef3e3fd414666b5c687712b73b536 2024-12-12T19:34:42,244 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/ccd8f040d33344fe8924fe4625f42bb5 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/ccd8f040d33344fe8924fe4625f42bb5 2024-12-12T19:34:42,245 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/6bed93737cf043ed8146333712061387 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/6bed93737cf043ed8146333712061387 2024-12-12T19:34:42,245 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/caa7d363d323424fab07fdd683d57e9a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/caa7d363d323424fab07fdd683d57e9a 2024-12-12T19:34:42,245 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/c61c2baad9a8449a90560d9b4fe5bbe5 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/c61c2baad9a8449a90560d9b4fe5bbe5 2024-12-12T19:34:42,245 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/e2e4ee2d413e4485a5d39b6095bb4bbb to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/e2e4ee2d413e4485a5d39b6095bb4bbb 2024-12-12T19:34:42,245 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/7d589798aad24cea94941a0c23f17bd9 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/7d589798aad24cea94941a0c23f17bd9 2024-12-12T19:34:42,245 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/dc189028329647ada69c73e833203341 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/dc189028329647ada69c73e833203341 2024-12-12T19:34:42,245 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/c4ebf62a1cc44613a008a14ef5c66ce6 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/c4ebf62a1cc44613a008a14ef5c66ce6 2024-12-12T19:34:42,245 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/753b3d936a64493ca25931ca4f7827fa to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/753b3d936a64493ca25931ca4f7827fa 2024-12-12T19:34:42,246 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/f9bec7da9a7544c4afdeb590e9a0d9b9 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/f9bec7da9a7544c4afdeb590e9a0d9b9 2024-12-12T19:34:42,251 DEBUG [StoreCloser-TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/540b897376174d4e803f497e6f8b7958, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/db054f357131468a8969d5f5223ca71a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/7e24e2cc0eb74e39872a954a0c91e445, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/83c3c252b75147c5b2e58252fefa1fa0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/175ec001c1574aaa9f2954ab73f51b7d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/4cf14239d40244eea6d639e7ef76d8db, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/d35abc8bf239434b982f69c21183b901, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/6359a938739e46d9b2d56f97d5cc8b05, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/b2b613887ce443e591ae5fe666a80612, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/674d0d58d014477ea24e909391c7a4b0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/efda3d6e6b8e4f1e99e4ee914187b1c1, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/8e4532ed076a4adbb227696994951f50, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/9e47dbe2257f41adbe7c9465e42557da, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/a150f499d5664e389ad5a66fca36d853, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/53f53c0fc6504f7694cb82a236bf507e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/7d38814215974d39b3c5ad56b8028c0c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0090ecc4fc8c44a184b860abd3ef6d10, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/709c765769ed4d949b7dc2b77c3cfde4, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0d794d035bc64570a6d4803d8594f797, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/124f329c2ac64284923c820ce89378de, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/5ff7eae19850404c9959e02f11c84fc0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/e952082fb1604ef9ab9120afe91b5016, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/63b27405146d47d89656f41f5e3f9a83, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/918ed26a8bbb48dea6e47ec00ab2ed29, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/39ad26a510424435a2d497674a0fb25f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/4e602fdf11854336b4fd6d0d6fb9bc95, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/ef6f5f20080a46a7948ecc244b955a79, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/6c613e5fe09a4c42868b6ff55903626d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/c4c364207d614c65855b9115ef177f03, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/b7f9e8d0f2524b2a9c34af6471a31dd2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/fe90d85a60aa4d74b6dc7dbf326c62b0, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/d74636c366674ce1b0e627ef7b4439a6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/06f406c40fa74a9ba55bd5d236a18a94, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0a3c4749a09d4fd2a50ab213a8f1894c] to archive 2024-12-12T19:34:42,252 DEBUG [StoreCloser-TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:34:42,255 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/4cf14239d40244eea6d639e7ef76d8db to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/4cf14239d40244eea6d639e7ef76d8db 2024-12-12T19:34:42,255 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/83c3c252b75147c5b2e58252fefa1fa0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/83c3c252b75147c5b2e58252fefa1fa0 2024-12-12T19:34:42,255 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/db054f357131468a8969d5f5223ca71a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/db054f357131468a8969d5f5223ca71a 2024-12-12T19:34:42,255 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/6359a938739e46d9b2d56f97d5cc8b05 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/6359a938739e46d9b2d56f97d5cc8b05 2024-12-12T19:34:42,255 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/540b897376174d4e803f497e6f8b7958 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/540b897376174d4e803f497e6f8b7958 2024-12-12T19:34:42,255 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/d35abc8bf239434b982f69c21183b901 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/d35abc8bf239434b982f69c21183b901 2024-12-12T19:34:42,255 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/7e24e2cc0eb74e39872a954a0c91e445 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/7e24e2cc0eb74e39872a954a0c91e445 2024-12-12T19:34:42,255 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/175ec001c1574aaa9f2954ab73f51b7d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/175ec001c1574aaa9f2954ab73f51b7d 2024-12-12T19:34:42,256 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/53f53c0fc6504f7694cb82a236bf507e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/53f53c0fc6504f7694cb82a236bf507e 2024-12-12T19:34:42,257 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/8e4532ed076a4adbb227696994951f50 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/8e4532ed076a4adbb227696994951f50 2024-12-12T19:34:42,257 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/efda3d6e6b8e4f1e99e4ee914187b1c1 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/efda3d6e6b8e4f1e99e4ee914187b1c1 2024-12-12T19:34:42,257 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/9e47dbe2257f41adbe7c9465e42557da to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/9e47dbe2257f41adbe7c9465e42557da 2024-12-12T19:34:42,257 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/b2b613887ce443e591ae5fe666a80612 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/b2b613887ce443e591ae5fe666a80612 2024-12-12T19:34:42,257 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/7d38814215974d39b3c5ad56b8028c0c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/7d38814215974d39b3c5ad56b8028c0c 2024-12-12T19:34:42,258 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0090ecc4fc8c44a184b860abd3ef6d10 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0090ecc4fc8c44a184b860abd3ef6d10 2024-12-12T19:34:42,259 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/709c765769ed4d949b7dc2b77c3cfde4 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/709c765769ed4d949b7dc2b77c3cfde4 2024-12-12T19:34:42,259 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0d794d035bc64570a6d4803d8594f797 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0d794d035bc64570a6d4803d8594f797 2024-12-12T19:34:42,259 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/5ff7eae19850404c9959e02f11c84fc0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/5ff7eae19850404c9959e02f11c84fc0 2024-12-12T19:34:42,259 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/674d0d58d014477ea24e909391c7a4b0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/674d0d58d014477ea24e909391c7a4b0 2024-12-12T19:34:42,259 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/e952082fb1604ef9ab9120afe91b5016 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/e952082fb1604ef9ab9120afe91b5016 2024-12-12T19:34:42,259 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/124f329c2ac64284923c820ce89378de to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/124f329c2ac64284923c820ce89378de 2024-12-12T19:34:42,260 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/39ad26a510424435a2d497674a0fb25f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/39ad26a510424435a2d497674a0fb25f 2024-12-12T19:34:42,261 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/63b27405146d47d89656f41f5e3f9a83 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/63b27405146d47d89656f41f5e3f9a83 2024-12-12T19:34:42,261 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/a150f499d5664e389ad5a66fca36d853 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/a150f499d5664e389ad5a66fca36d853 2024-12-12T19:34:42,261 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/ef6f5f20080a46a7948ecc244b955a79 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/ef6f5f20080a46a7948ecc244b955a79 2024-12-12T19:34:42,261 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/4e602fdf11854336b4fd6d0d6fb9bc95 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/4e602fdf11854336b4fd6d0d6fb9bc95 2024-12-12T19:34:42,261 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/918ed26a8bbb48dea6e47ec00ab2ed29 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/918ed26a8bbb48dea6e47ec00ab2ed29 2024-12-12T19:34:42,261 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/6c613e5fe09a4c42868b6ff55903626d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/6c613e5fe09a4c42868b6ff55903626d 2024-12-12T19:34:42,262 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/b7f9e8d0f2524b2a9c34af6471a31dd2 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/b7f9e8d0f2524b2a9c34af6471a31dd2 2024-12-12T19:34:42,262 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/c4c364207d614c65855b9115ef177f03 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/c4c364207d614c65855b9115ef177f03 2024-12-12T19:34:42,263 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/d74636c366674ce1b0e627ef7b4439a6 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/d74636c366674ce1b0e627ef7b4439a6 2024-12-12T19:34:42,265 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/06f406c40fa74a9ba55bd5d236a18a94 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/06f406c40fa74a9ba55bd5d236a18a94 2024-12-12T19:34:42,265 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/fe90d85a60aa4d74b6dc7dbf326c62b0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/fe90d85a60aa4d74b6dc7dbf326c62b0 2024-12-12T19:34:42,265 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0a3c4749a09d4fd2a50ab213a8f1894c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/0a3c4749a09d4fd2a50ab213a8f1894c 2024-12-12T19:34:42,268 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/recovered.edits/506.seqid, newMaxSeqId=506, maxSeqId=1 2024-12-12T19:34:42,271 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c. 2024-12-12T19:34:42,271 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1635): Region close journal for d2250407ef6b64ed659c4133a7c4d89c: 2024-12-12T19:34:42,272 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] handler.UnassignRegionHandler(170): Closed d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:42,275 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=d2250407ef6b64ed659c4133a7c4d89c, regionState=CLOSED 2024-12-12T19:34:42,278 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-12T19:34:42,278 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; CloseRegionProcedure d2250407ef6b64ed659c4133a7c4d89c, server=4c9c438b6eeb,42689,1734031923038 in 246 msec 2024-12-12T19:34:42,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=138, resume processing ppid=137 2024-12-12T19:34:42,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, ppid=137, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2250407ef6b64ed659c4133a7c4d89c, UNASSIGN in 250 msec 2024-12-12T19:34:42,285 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-12T19:34:42,285 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 258 msec 2024-12-12T19:34:42,286 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734032082286"}]},"ts":"1734032082286"} 2024-12-12T19:34:42,292 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T19:34:42,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T19:34:42,318 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T19:34:42,320 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 331 msec 2024-12-12T19:34:42,558 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-12T19:34:42,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T19:34:42,608 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-12T19:34:42,609 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T19:34:42,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:42,610 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=140, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:42,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-12T19:34:42,611 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=140, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:42,619 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:42,636 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/recovered.edits] 2024-12-12T19:34:42,639 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/0a51c2b3410a4bad8c328aa068232d91 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/0a51c2b3410a4bad8c328aa068232d91 2024-12-12T19:34:42,640 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/9f6774bc282a492d8cc52f799e304d06 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/A/9f6774bc282a492d8cc52f799e304d06 2024-12-12T19:34:42,656 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/09859cb689d44ae390db1234a13e325c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/09859cb689d44ae390db1234a13e325c 2024-12-12T19:34:42,657 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/3dcb94aba613497a839d2d95902fd4bb to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/B/3dcb94aba613497a839d2d95902fd4bb 2024-12-12T19:34:42,659 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/458dd0b1103343f6b44b9d128e9a0ae4 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/458dd0b1103343f6b44b9d128e9a0ae4 2024-12-12T19:34:42,660 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/8608a0f841444cb08bf7307a551174e0 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/C/8608a0f841444cb08bf7307a551174e0 2024-12-12T19:34:42,662 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/recovered.edits/506.seqid to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c/recovered.edits/506.seqid 2024-12-12T19:34:42,663 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/d2250407ef6b64ed659c4133a7c4d89c 2024-12-12T19:34:42,663 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T19:34:42,676 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=140, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:42,695 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T19:34:42,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-12T19:34:42,724 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T19:34:42,739 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=140, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:42,740 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T19:34:42,740 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734032082740"}]},"ts":"9223372036854775807"} 2024-12-12T19:34:42,752 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T19:34:42,752 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d2250407ef6b64ed659c4133a7c4d89c, NAME => 'TestAcidGuarantees,,1734032059631.d2250407ef6b64ed659c4133a7c4d89c.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T19:34:42,752 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T19:34:42,752 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734032082752"}]},"ts":"9223372036854775807"} 2024-12-12T19:34:42,764 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T19:34:42,803 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=140, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:42,815 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 194 msec 2024-12-12T19:34:42,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-12T19:34:42,928 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-12T19:34:42,935 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=244 (was 246), OpenFileDescriptor=453 (was 455), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1501 (was 1476) - SystemLoadAverage LEAK? -, ProcessCount=9 (was 11), AvailableMemoryMB=9763 (was 8038) - AvailableMemoryMB LEAK? - 2024-12-12T19:34:42,944 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=244, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=1501, ProcessCount=9, AvailableMemoryMB=9762 2024-12-12T19:34:42,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T19:34:42,945 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T19:34:42,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=141, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:42,947 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T19:34:42,947 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:42,947 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 141 2024-12-12T19:34:42,948 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T19:34:42,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-12T19:34:42,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742357_1533 (size=963) 2024-12-12T19:34:43,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-12T19:34:43,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-12T19:34:43,363 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98 2024-12-12T19:34:43,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742358_1534 (size=53) 2024-12-12T19:34:43,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-12T19:34:43,781 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:34:43,781 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 9e48e6f12fe9b1293921e76b13fdbb7f, disabling compactions & flushes 2024-12-12T19:34:43,781 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:43,781 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:43,781 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. after waiting 0 ms 2024-12-12T19:34:43,781 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:43,781 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:43,781 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:43,782 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T19:34:43,783 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734032083782"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734032083782"}]},"ts":"1734032083782"} 2024-12-12T19:34:43,784 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T19:34:43,785 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T19:34:43,785 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734032083785"}]},"ts":"1734032083785"} 2024-12-12T19:34:43,786 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T19:34:43,810 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9e48e6f12fe9b1293921e76b13fdbb7f, ASSIGN}] 2024-12-12T19:34:43,815 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9e48e6f12fe9b1293921e76b13fdbb7f, ASSIGN 2024-12-12T19:34:43,819 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9e48e6f12fe9b1293921e76b13fdbb7f, ASSIGN; state=OFFLINE, location=4c9c438b6eeb,42689,1734031923038; forceNewPlan=false, retain=false 2024-12-12T19:34:43,970 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=9e48e6f12fe9b1293921e76b13fdbb7f, regionState=OPENING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:43,971 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; OpenRegionProcedure 9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:34:44,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-12T19:34:44,123 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:44,126 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:44,126 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(7285): Opening region: {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} 2024-12-12T19:34:44,127 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:44,127 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:34:44,127 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(7327): checking encryption for 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:44,127 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(7330): checking classloading for 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:44,128 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:44,130 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:34:44,130 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e48e6f12fe9b1293921e76b13fdbb7f columnFamilyName A 2024-12-12T19:34:44,130 DEBUG [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:44,131 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.HStore(327): Store=9e48e6f12fe9b1293921e76b13fdbb7f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:34:44,131 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:44,132 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:34:44,133 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e48e6f12fe9b1293921e76b13fdbb7f columnFamilyName B 2024-12-12T19:34:44,133 DEBUG [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:44,133 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.HStore(327): Store=9e48e6f12fe9b1293921e76b13fdbb7f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:34:44,133 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:44,135 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:34:44,135 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e48e6f12fe9b1293921e76b13fdbb7f columnFamilyName C 2024-12-12T19:34:44,135 DEBUG [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:44,136 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.HStore(327): Store=9e48e6f12fe9b1293921e76b13fdbb7f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:34:44,136 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:44,137 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:44,137 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:44,139 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T19:34:44,140 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(1085): writing seq id for 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:44,142 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T19:34:44,143 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(1102): Opened 9e48e6f12fe9b1293921e76b13fdbb7f; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58749457, jitterRate=-0.12456487119197845}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T19:34:44,144 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(1001): Region open journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:44,145 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., pid=143, masterSystemTime=1734032084123 2024-12-12T19:34:44,146 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:44,146 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:44,147 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=9e48e6f12fe9b1293921e76b13fdbb7f, regionState=OPEN, openSeqNum=2, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:44,149 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-12T19:34:44,149 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; OpenRegionProcedure 9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 in 177 msec 2024-12-12T19:34:44,152 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-12-12T19:34:44,152 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9e48e6f12fe9b1293921e76b13fdbb7f, ASSIGN in 339 msec 2024-12-12T19:34:44,152 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T19:34:44,153 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734032084152"}]},"ts":"1734032084152"} 2024-12-12T19:34:44,154 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T19:34:44,194 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T19:34:44,195 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2490 sec 2024-12-12T19:34:45,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-12T19:34:45,053 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 141 completed 2024-12-12T19:34:45,054 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d7fe431 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@60d631a3 2024-12-12T19:34:45,100 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69abefea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:45,102 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:45,104 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49034, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:45,105 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T19:34:45,108 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56144, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T19:34:45,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T19:34:45,110 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T19:34:45,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-12T19:34:45,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742359_1535 (size=999) 2024-12-12T19:34:45,557 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-12T19:34:45,557 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-12T19:34:45,560 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T19:34:45,562 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9e48e6f12fe9b1293921e76b13fdbb7f, REOPEN/MOVE}] 2024-12-12T19:34:45,563 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9e48e6f12fe9b1293921e76b13fdbb7f, REOPEN/MOVE 2024-12-12T19:34:45,564 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=9e48e6f12fe9b1293921e76b13fdbb7f, regionState=CLOSING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:45,565 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T19:34:45,565 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; CloseRegionProcedure 9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:34:45,716 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:45,717 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(124): Close 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:45,717 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T19:34:45,717 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1681): Closing 9e48e6f12fe9b1293921e76b13fdbb7f, disabling compactions & flushes 2024-12-12T19:34:45,717 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:45,717 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:45,717 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. after waiting 0 ms 2024-12-12T19:34:45,717 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:45,721 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-12T19:34:45,722 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:45,722 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1635): Region close journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:45,722 WARN [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionServer(3786): Not adding moved region record: 9e48e6f12fe9b1293921e76b13fdbb7f to self. 2024-12-12T19:34:45,724 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(170): Closed 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:45,724 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=9e48e6f12fe9b1293921e76b13fdbb7f, regionState=CLOSED 2024-12-12T19:34:45,727 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-12T19:34:45,727 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; CloseRegionProcedure 9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 in 160 msec 2024-12-12T19:34:45,728 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9e48e6f12fe9b1293921e76b13fdbb7f, REOPEN/MOVE; state=CLOSED, location=4c9c438b6eeb,42689,1734031923038; forceNewPlan=false, retain=true 2024-12-12T19:34:45,878 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=9e48e6f12fe9b1293921e76b13fdbb7f, regionState=OPENING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:45,880 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=146, state=RUNNABLE; OpenRegionProcedure 9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:34:46,031 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,033 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:46,034 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(7285): Opening region: {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} 2024-12-12T19:34:46,034 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:46,034 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T19:34:46,034 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(7327): checking encryption for 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:46,034 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(7330): checking classloading for 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:46,035 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:46,035 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:34:46,036 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e48e6f12fe9b1293921e76b13fdbb7f columnFamilyName A 2024-12-12T19:34:46,037 DEBUG [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:46,037 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.HStore(327): Store=9e48e6f12fe9b1293921e76b13fdbb7f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:34:46,037 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:46,038 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:34:46,038 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e48e6f12fe9b1293921e76b13fdbb7f columnFamilyName B 2024-12-12T19:34:46,038 DEBUG [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:46,038 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.HStore(327): Store=9e48e6f12fe9b1293921e76b13fdbb7f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:34:46,038 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:46,039 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T19:34:46,039 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e48e6f12fe9b1293921e76b13fdbb7f columnFamilyName C 2024-12-12T19:34:46,039 DEBUG [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:46,039 INFO [StoreOpener-9e48e6f12fe9b1293921e76b13fdbb7f-1 {}] regionserver.HStore(327): Store=9e48e6f12fe9b1293921e76b13fdbb7f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T19:34:46,039 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:46,040 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:46,040 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:46,041 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T19:34:46,043 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(1085): writing seq id for 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:46,043 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(1102): Opened 9e48e6f12fe9b1293921e76b13fdbb7f; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62685452, jitterRate=-0.06591397523880005}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T19:34:46,044 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(1001): Region open journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:46,045 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., pid=148, masterSystemTime=1734032086031 2024-12-12T19:34:46,046 DEBUG [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:46,046 INFO [RS_OPEN_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:46,046 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=9e48e6f12fe9b1293921e76b13fdbb7f, regionState=OPEN, openSeqNum=5, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,048 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=146 2024-12-12T19:34:46,048 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=146, state=SUCCESS; OpenRegionProcedure 9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 in 167 msec 2024-12-12T19:34:46,049 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-12-12T19:34:46,050 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9e48e6f12fe9b1293921e76b13fdbb7f, REOPEN/MOVE in 486 msec 2024-12-12T19:34:46,051 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-12T19:34:46,051 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 491 msec 2024-12-12T19:34:46,052 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 940 msec 2024-12-12T19:34:46,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-12T19:34:46,053 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x091d72db to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58971172 2024-12-12T19:34:46,127 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e757135, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:46,128 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d836f78 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d7fe93b 2024-12-12T19:34:46,160 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7846cb78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:46,161 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53305d9b to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11c440f7 2024-12-12T19:34:46,183 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f1754bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:46,184 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bb6288a to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58460ef3 2024-12-12T19:34:46,194 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d9113f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:46,195 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06556601 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e8cd1ae 2024-12-12T19:34:46,211 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bb75907, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:46,212 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d832d43 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c1d3a95 2024-12-12T19:34:46,233 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50bf224f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:46,234 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x15b6349f to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@503a7d2e 2024-12-12T19:34:46,255 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79be903c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:46,256 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x439b60d5 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@404bb685 2024-12-12T19:34:46,272 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d79f1c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:46,272 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f48b1c2 to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@42aacb30 2024-12-12T19:34:46,277 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40dfd554, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:46,278 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7287c75d to 127.0.0.1:52216 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@66e06176 2024-12-12T19:34:46,285 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@582b6d8b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T19:34:46,290 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:46,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=149, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees 2024-12-12T19:34:46,291 DEBUG [hconnection-0x4e0c05b3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:46,291 DEBUG [hconnection-0x5c4c1218-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:46,292 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49048, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:46,292 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49056, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:46,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-12T19:34:46,292 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=149, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:46,294 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=149, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:46,294 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:46,295 DEBUG [hconnection-0x63f0d024-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:46,297 DEBUG [hconnection-0x7d003b6a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:46,297 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:46,297 DEBUG [hconnection-0x831e58f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:46,297 DEBUG [hconnection-0x3cd03c83-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:46,298 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49074, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:46,298 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49080, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:46,299 DEBUG [hconnection-0xc914ba6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:46,299 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49088, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:46,301 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49094, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:46,301 DEBUG [hconnection-0x5f70fac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:46,302 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49102, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:46,303 DEBUG [hconnection-0x7dcee5e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:46,303 DEBUG [hconnection-0x730d1781-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T19:34:46,304 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49114, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:46,305 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49118, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T19:34:46,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:46,305 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T19:34:46,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:34:46,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:46,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:34:46,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:46,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:34:46,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:46,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49114 deadline: 1734032146326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49074 deadline: 1734032146327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49102 deadline: 1734032146327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032146330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,332 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212eb2c2ec80cbf43b8b55baf070f40302e_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032086304/Put/seqid=0 2024-12-12T19:34:46,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742360_1536 (size=12154) 2024-12-12T19:34:46,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49048 deadline: 1734032146334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-12T19:34:46,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49102 deadline: 1734032146431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49074 deadline: 1734032146431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49114 deadline: 1734032146431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032146436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49048 deadline: 1734032146437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,449 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,450 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-12T19:34:46,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:46,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:46,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:46,450 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:46,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-12T19:34:46,603 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-12T19:34:46,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:46,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:46,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:46,605 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:46,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:46,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:46,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49102 deadline: 1734032146634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49114 deadline: 1734032146641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49074 deadline: 1734032146641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,648 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032146647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,648 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49048 deadline: 1734032146647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,739 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:46,745 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212eb2c2ec80cbf43b8b55baf070f40302e_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212eb2c2ec80cbf43b8b55baf070f40302e_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:46,747 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/dee303a8daee426a9e47cb1f4171ea9c, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:46,748 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/dee303a8daee426a9e47cb1f4171ea9c is 175, key is test_row_0/A:col10/1734032086304/Put/seqid=0 2024-12-12T19:34:46,763 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-12T19:34:46,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:46,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:46,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:46,764 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:46,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:46,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:46,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742361_1537 (size=30955) 2024-12-12T19:34:46,788 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/dee303a8daee426a9e47cb1f4171ea9c 2024-12-12T19:34:46,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/cedab7db78d240bba776607200be2f21 is 50, key is test_row_0/B:col10/1734032086304/Put/seqid=0 2024-12-12T19:34:46,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742362_1538 (size=12001) 2024-12-12T19:34:46,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-12T19:34:46,919 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,920 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-12T19:34:46,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:46,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:46,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:46,920 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:46,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:46,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:46,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49102 deadline: 1734032146942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032146956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49048 deadline: 1734032146961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49074 deadline: 1734032146961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:46,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:46,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49114 deadline: 1734032146963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:47,079 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:47,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-12T19:34:47,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:47,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:47,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:47,083 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:47,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:47,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:47,243 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:47,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-12T19:34:47,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:47,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:47,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:47,244 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:47,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:47,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:47,295 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/cedab7db78d240bba776607200be2f21 2024-12-12T19:34:47,365 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/065ae5e8b62142af999bc03c1d35b7f5 is 50, key is test_row_0/C:col10/1734032086304/Put/seqid=0 2024-12-12T19:34:47,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742363_1539 (size=12001) 2024-12-12T19:34:47,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/065ae5e8b62142af999bc03c1d35b7f5 2024-12-12T19:34:47,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-12T19:34:47,407 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:47,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-12T19:34:47,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:47,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:47,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:47,408 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:47,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:47,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:47,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/dee303a8daee426a9e47cb1f4171ea9c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/dee303a8daee426a9e47cb1f4171ea9c 2024-12-12T19:34:47,443 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/dee303a8daee426a9e47cb1f4171ea9c, entries=150, sequenceid=16, filesize=30.2 K 2024-12-12T19:34:47,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/cedab7db78d240bba776607200be2f21 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/cedab7db78d240bba776607200be2f21 2024-12-12T19:34:47,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:47,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49102 deadline: 1734032147459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:47,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:47,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032147464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:47,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:47,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49114 deadline: 1734032147466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:47,469 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:47,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49048 deadline: 1734032147467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:47,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/cedab7db78d240bba776607200be2f21, entries=150, sequenceid=16, filesize=11.7 K 2024-12-12T19:34:47,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:47,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49074 deadline: 1734032147472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:47,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/065ae5e8b62142af999bc03c1d35b7f5 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/065ae5e8b62142af999bc03c1d35b7f5 2024-12-12T19:34:47,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/065ae5e8b62142af999bc03c1d35b7f5, entries=150, sequenceid=16, filesize=11.7 K 2024-12-12T19:34:47,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 9e48e6f12fe9b1293921e76b13fdbb7f in 1182ms, sequenceid=16, compaction requested=false 2024-12-12T19:34:47,487 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-12T19:34:47,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:47,581 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:47,583 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-12T19:34:47,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:47,584 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T19:34:47,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:34:47,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:47,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:34:47,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:47,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:34:47,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:47,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412122960061add0046778f8a922277e045df_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032086326/Put/seqid=0 2024-12-12T19:34:47,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742364_1540 (size=12154) 2024-12-12T19:34:47,816 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T19:34:48,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:48,095 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412122960061add0046778f8a922277e045df_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412122960061add0046778f8a922277e045df_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:48,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/6be0701625234af6938b6fdc61a32911, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:48,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/6be0701625234af6938b6fdc61a32911 is 175, key is test_row_0/A:col10/1734032086326/Put/seqid=0 2024-12-12T19:34:48,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742365_1541 (size=30955) 2024-12-12T19:34:48,124 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/6be0701625234af6938b6fdc61a32911 2024-12-12T19:34:48,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/f364a43ae5ce4f73a5b11594bd110b6f is 50, key is test_row_0/B:col10/1734032086326/Put/seqid=0 2024-12-12T19:34:48,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742366_1542 (size=12001) 2024-12-12T19:34:48,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-12T19:34:48,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:48,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:48,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49102 deadline: 1734032148487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:48,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032148488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:48,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49114 deadline: 1734032148493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:48,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49048 deadline: 1734032148495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:48,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49074 deadline: 1734032148502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:48,567 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/f364a43ae5ce4f73a5b11594bd110b6f 2024-12-12T19:34:48,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032148600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:48,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49102 deadline: 1734032148605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:48,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/e6f738f6d1c34fa38ca2df27b68a692b is 50, key is test_row_0/C:col10/1734032086326/Put/seqid=0 2024-12-12T19:34:48,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49074 deadline: 1734032148617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:48,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49048 deadline: 1734032148617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:48,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49114 deadline: 1734032148614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:48,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742367_1543 (size=12001) 2024-12-12T19:34:48,675 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/e6f738f6d1c34fa38ca2df27b68a692b 2024-12-12T19:34:48,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/6be0701625234af6938b6fdc61a32911 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/6be0701625234af6938b6fdc61a32911 2024-12-12T19:34:48,739 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/6be0701625234af6938b6fdc61a32911, entries=150, sequenceid=41, filesize=30.2 K 2024-12-12T19:34:48,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/f364a43ae5ce4f73a5b11594bd110b6f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/f364a43ae5ce4f73a5b11594bd110b6f 2024-12-12T19:34:48,771 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/f364a43ae5ce4f73a5b11594bd110b6f, entries=150, sequenceid=41, filesize=11.7 K 2024-12-12T19:34:48,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/e6f738f6d1c34fa38ca2df27b68a692b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/e6f738f6d1c34fa38ca2df27b68a692b 2024-12-12T19:34:48,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032148808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:48,811 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/e6f738f6d1c34fa38ca2df27b68a692b, entries=150, sequenceid=41, filesize=11.7 K 2024-12-12T19:34:48,818 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 9e48e6f12fe9b1293921e76b13fdbb7f in 1235ms, sequenceid=41, compaction requested=false 2024-12-12T19:34:48,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:48,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:48,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=150 2024-12-12T19:34:48,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=150 2024-12-12T19:34:48,819 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T19:34:48,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:34:48,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:48,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:34:48,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:48,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:34:48,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:48,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:48,824 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-12-12T19:34:48,824 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5280 sec 2024-12-12T19:34:48,826 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees in 2.5350 sec 2024-12-12T19:34:48,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124d395bc5d86d48a8b497c2bd94009f29_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032088490/Put/seqid=0 2024-12-12T19:34:48,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49102 deadline: 1734032148881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:48,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49074 deadline: 1734032148883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:48,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742368_1544 (size=12154) 2024-12-12T19:34:48,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49048 deadline: 1734032148891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:48,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49114 deadline: 1734032148896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:48,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49102 deadline: 1734032148989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:48,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:48,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49074 deadline: 1734032148989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:49,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:49,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49114 deadline: 1734032149002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:49,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:49,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49048 deadline: 1734032149002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:49,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:49,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032149115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:49,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:49,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49074 deadline: 1734032149196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:49,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:49,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49102 deadline: 1734032149198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:49,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:49,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49114 deadline: 1734032149205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:49,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:49,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49048 deadline: 1734032149217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:49,287 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:49,304 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124d395bc5d86d48a8b497c2bd94009f29_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124d395bc5d86d48a8b497c2bd94009f29_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:49,309 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/d8bcdb455a8f4d4184b4295ba7763f40, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:49,310 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/d8bcdb455a8f4d4184b4295ba7763f40 is 175, key is test_row_0/A:col10/1734032088490/Put/seqid=0 2024-12-12T19:34:49,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742369_1545 (size=30955) 2024-12-12T19:34:49,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:49,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49074 deadline: 1734032149502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:49,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:49,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49102 deadline: 1734032149503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:49,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:49,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49114 deadline: 1734032149511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:49,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:49,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49048 deadline: 1734032149552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:49,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:49,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032149619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:49,728 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/d8bcdb455a8f4d4184b4295ba7763f40 2024-12-12T19:34:49,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/a692613c1db74f29b08fe8695a19d986 is 50, key is test_row_0/B:col10/1734032088490/Put/seqid=0 2024-12-12T19:34:49,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742370_1546 (size=12001) 2024-12-12T19:34:50,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:50,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49074 deadline: 1734032150007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:50,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:50,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49102 deadline: 1734032150010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:50,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:50,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49114 deadline: 1734032150027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:50,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:50,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49048 deadline: 1734032150066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:50,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/a692613c1db74f29b08fe8695a19d986 2024-12-12T19:34:50,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/d7ecc28e444e4be28dd0d0f827bf547b is 50, key is test_row_0/C:col10/1734032088490/Put/seqid=0 2024-12-12T19:34:50,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742371_1547 (size=12001) 2024-12-12T19:34:50,335 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/d7ecc28e444e4be28dd0d0f827bf547b 2024-12-12T19:34:50,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/d8bcdb455a8f4d4184b4295ba7763f40 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/d8bcdb455a8f4d4184b4295ba7763f40 2024-12-12T19:34:50,392 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/d8bcdb455a8f4d4184b4295ba7763f40, entries=150, sequenceid=55, filesize=30.2 K 2024-12-12T19:34:50,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/a692613c1db74f29b08fe8695a19d986 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/a692613c1db74f29b08fe8695a19d986 2024-12-12T19:34:50,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/a692613c1db74f29b08fe8695a19d986, entries=150, sequenceid=55, filesize=11.7 K 2024-12-12T19:34:50,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/d7ecc28e444e4be28dd0d0f827bf547b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/d7ecc28e444e4be28dd0d0f827bf547b 2024-12-12T19:34:50,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-12T19:34:50,420 INFO [Thread-2392 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 149 completed 2024-12-12T19:34:50,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/d7ecc28e444e4be28dd0d0f827bf547b, entries=150, sequenceid=55, filesize=11.7 K 2024-12-12T19:34:50,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 9e48e6f12fe9b1293921e76b13fdbb7f in 1610ms, sequenceid=55, compaction requested=true 2024-12-12T19:34:50,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:50,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:50,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:50,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:50,429 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:50,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:50,429 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:50,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:50,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:50,430 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:50,430 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/B is initiating minor compaction (all files) 2024-12-12T19:34:50,430 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/B in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:50,430 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/cedab7db78d240bba776607200be2f21, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/f364a43ae5ce4f73a5b11594bd110b6f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/a692613c1db74f29b08fe8695a19d986] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=35.2 K 2024-12-12T19:34:50,431 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:50,431 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/A is initiating minor compaction (all files) 2024-12-12T19:34:50,431 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/A in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:50,431 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/dee303a8daee426a9e47cb1f4171ea9c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/6be0701625234af6938b6fdc61a32911, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/d8bcdb455a8f4d4184b4295ba7763f40] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=90.7 K 2024-12-12T19:34:50,431 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting cedab7db78d240bba776607200be2f21, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734032086304 2024-12-12T19:34:50,431 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:50,431 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/dee303a8daee426a9e47cb1f4171ea9c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/6be0701625234af6938b6fdc61a32911, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/d8bcdb455a8f4d4184b4295ba7763f40] 2024-12-12T19:34:50,432 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:50,432 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting f364a43ae5ce4f73a5b11594bd110b6f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734032086325 2024-12-12T19:34:50,432 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting dee303a8daee426a9e47cb1f4171ea9c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734032086304 2024-12-12T19:34:50,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=151, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees 2024-12-12T19:34:50,433 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6be0701625234af6938b6fdc61a32911, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734032086325 2024-12-12T19:34:50,433 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting a692613c1db74f29b08fe8695a19d986, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734032088480 2024-12-12T19:34:50,433 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8bcdb455a8f4d4184b4295ba7763f40, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734032088480 2024-12-12T19:34:50,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-12T19:34:50,437 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=151, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:50,438 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=151, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:50,438 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:50,440 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:50,442 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#B#compaction#463 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:50,443 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/18dc33f0d7ce4a6899fe7a4492681ccf is 50, key is test_row_0/B:col10/1734032088490/Put/seqid=0 2024-12-12T19:34:50,443 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212033f86a7c817415eba3dd2a5e85e5ba9_9e48e6f12fe9b1293921e76b13fdbb7f store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:50,445 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212033f86a7c817415eba3dd2a5e85e5ba9_9e48e6f12fe9b1293921e76b13fdbb7f, store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:50,445 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212033f86a7c817415eba3dd2a5e85e5ba9_9e48e6f12fe9b1293921e76b13fdbb7f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:50,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742373_1549 (size=4469) 2024-12-12T19:34:50,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742372_1548 (size=12104) 2024-12-12T19:34:50,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-12T19:34:50,594 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:50,594 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-12T19:34:50,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:50,595 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T19:34:50,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:34:50,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:50,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:34:50,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:50,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:34:50,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:50,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:50,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:50,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121226be882e61fd4e51b71640f33dea7ae6_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032088865/Put/seqid=0 2024-12-12T19:34:50,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742374_1550 (size=12154) 2024-12-12T19:34:50,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:50,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032150678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:50,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-12T19:34:50,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:50,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032150780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:50,869 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#A#compaction#462 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:50,870 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/9f3511a629b940379864f06b7407d2f3 is 175, key is test_row_0/A:col10/1734032088490/Put/seqid=0 2024-12-12T19:34:50,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742375_1551 (size=31058) 2024-12-12T19:34:50,910 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/9f3511a629b940379864f06b7407d2f3 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/9f3511a629b940379864f06b7407d2f3 2024-12-12T19:34:50,916 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/18dc33f0d7ce4a6899fe7a4492681ccf as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/18dc33f0d7ce4a6899fe7a4492681ccf 2024-12-12T19:34:50,942 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/A of 9e48e6f12fe9b1293921e76b13fdbb7f into 9f3511a629b940379864f06b7407d2f3(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:50,942 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:50,942 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/A, priority=13, startTime=1734032090429; duration=0sec 2024-12-12T19:34:50,942 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:50,943 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:A 2024-12-12T19:34:50,943 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:50,946 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/B of 9e48e6f12fe9b1293921e76b13fdbb7f into 18dc33f0d7ce4a6899fe7a4492681ccf(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:50,946 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:50,946 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/B, priority=13, startTime=1734032090429; duration=0sec 2024-12-12T19:34:50,946 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:50,946 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:B 2024-12-12T19:34:50,946 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:50,946 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/C is initiating minor compaction (all files) 2024-12-12T19:34:50,946 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/C in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:50,946 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/065ae5e8b62142af999bc03c1d35b7f5, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/e6f738f6d1c34fa38ca2df27b68a692b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/d7ecc28e444e4be28dd0d0f827bf547b] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=35.2 K 2024-12-12T19:34:50,951 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 065ae5e8b62142af999bc03c1d35b7f5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734032086304 2024-12-12T19:34:50,954 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6f738f6d1c34fa38ca2df27b68a692b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734032086325 2024-12-12T19:34:50,957 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7ecc28e444e4be28dd0d0f827bf547b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734032088480 2024-12-12T19:34:50,978 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#C#compaction#465 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:50,979 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/500cf24034604fa9ad968f642f47ac9d is 50, key is test_row_0/C:col10/1734032088490/Put/seqid=0 2024-12-12T19:34:50,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:50,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032150986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:51,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:51,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49074 deadline: 1734032151011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:51,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742376_1552 (size=12104) 2024-12-12T19:34:51,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:51,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49102 deadline: 1734032151023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:51,035 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/500cf24034604fa9ad968f642f47ac9d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/500cf24034604fa9ad968f642f47ac9d 2024-12-12T19:34:51,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:51,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49114 deadline: 1734032151036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:51,042 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/C of 9e48e6f12fe9b1293921e76b13fdbb7f into 500cf24034604fa9ad968f642f47ac9d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:51,042 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:51,043 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/C, priority=13, startTime=1734032090430; duration=0sec 2024-12-12T19:34:51,043 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:51,043 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:C 2024-12-12T19:34:51,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:51,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-12T19:34:51,055 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121226be882e61fd4e51b71640f33dea7ae6_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121226be882e61fd4e51b71640f33dea7ae6_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:51,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/295b57c10dbf46f49986795cee95078c, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:51,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/295b57c10dbf46f49986795cee95078c is 175, key is test_row_0/A:col10/1734032088865/Put/seqid=0 2024-12-12T19:34:51,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:51,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49048 deadline: 1734032151083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:51,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742377_1553 (size=30955) 2024-12-12T19:34:51,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:51,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032151303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:51,497 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/295b57c10dbf46f49986795cee95078c 2024-12-12T19:34:51,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/03c6edbf1eec44d491e571ba621acd19 is 50, key is test_row_0/B:col10/1734032088865/Put/seqid=0 2024-12-12T19:34:51,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742378_1554 (size=12001) 2024-12-12T19:34:51,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-12T19:34:51,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:51,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032151808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:51,939 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/03c6edbf1eec44d491e571ba621acd19 2024-12-12T19:34:51,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/740174a23489423bb0f9acadd35e2cca is 50, key is test_row_0/C:col10/1734032088865/Put/seqid=0 2024-12-12T19:34:51,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742379_1555 (size=12001) 2024-12-12T19:34:52,395 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/740174a23489423bb0f9acadd35e2cca 2024-12-12T19:34:52,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/295b57c10dbf46f49986795cee95078c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/295b57c10dbf46f49986795cee95078c 2024-12-12T19:34:52,404 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/295b57c10dbf46f49986795cee95078c, entries=150, sequenceid=78, filesize=30.2 K 2024-12-12T19:34:52,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/03c6edbf1eec44d491e571ba621acd19 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/03c6edbf1eec44d491e571ba621acd19 2024-12-12T19:34:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,433 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/03c6edbf1eec44d491e571ba621acd19, entries=150, sequenceid=78, filesize=11.7 K 2024-12-12T19:34:52,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/740174a23489423bb0f9acadd35e2cca as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/740174a23489423bb0f9acadd35e2cca 2024-12-12T19:34:52,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,446 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/740174a23489423bb0f9acadd35e2cca, entries=150, sequenceid=78, filesize=11.7 K 2024-12-12T19:34:52,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,449 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 9e48e6f12fe9b1293921e76b13fdbb7f in 1855ms, sequenceid=78, compaction requested=false 2024-12-12T19:34:52,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:52,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:52,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=152 2024-12-12T19:34:52,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=152 2024-12-12T19:34:52,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,499 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-12-12T19:34:52,499 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0250 sec 2024-12-12T19:34:52,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,531 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees in 2.0760 sec 2024-12-12T19:34:52,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-12T19:34:52,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,553 INFO [Thread-2392 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 151 completed 2024-12-12T19:34:52,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,557 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:52,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees 2024-12-12T19:34:52,559 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-12T19:34:52,559 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-12T19:34:52,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,559 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:52,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T19:34:52,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,560 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:52,560 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:52,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T19:34:52,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,712 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-12T19:34:52,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:52,717 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T19:34:52,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:34:52,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:52,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:34:52,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:52,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:34:52,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:52,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121213176b6c40c9427fa346f2dd7aaff344_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032090675/Put/seqid=0 2024-12-12T19:34:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742380_1556 (size=9714) 2024-12-12T19:34:52,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,789 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121213176b6c40c9427fa346f2dd7aaff344_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121213176b6c40c9427fa346f2dd7aaff344_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:52,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/9780a8dd51f546b9b6b8795c5fe18e24, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:52,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/9780a8dd51f546b9b6b8795c5fe18e24 is 175, key is test_row_0/A:col10/1734032090675/Put/seqid=0 2024-12-12T19:34:52,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742381_1557 (size=22361) 2024-12-12T19:34:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:52,825 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T19:34:52,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:52,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:52,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032152911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:53,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:53,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49074 deadline: 1734032153016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:53,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:53,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032153020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:53,023 DEBUG [Thread-2388 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4139 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:34:53,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:53,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49102 deadline: 1734032153030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:53,032 DEBUG [Thread-2386 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:34:53,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:53,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49114 deadline: 1734032153058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:53,063 DEBUG [Thread-2390 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4168 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:34:53,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:53,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49048 deadline: 1734032153111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:53,115 DEBUG [Thread-2384 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4224 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:34:53,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T19:34:53,218 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/9780a8dd51f546b9b6b8795c5fe18e24 2024-12-12T19:34:53,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032153226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:53,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/1697b2d886524decae15e22fff838dda is 50, key is test_row_0/B:col10/1734032090675/Put/seqid=0 2024-12-12T19:34:53,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742382_1558 (size=9657) 2024-12-12T19:34:53,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:53,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032153536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:53,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T19:34:53,687 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/1697b2d886524decae15e22fff838dda 2024-12-12T19:34:53,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/542f862409914b109b56a0e49c0813cc is 50, key is test_row_0/C:col10/1734032090675/Put/seqid=0 2024-12-12T19:34:53,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742383_1559 (size=9657) 2024-12-12T19:34:54,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:54,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032154045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:54,171 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/542f862409914b109b56a0e49c0813cc 2024-12-12T19:34:54,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/9780a8dd51f546b9b6b8795c5fe18e24 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/9780a8dd51f546b9b6b8795c5fe18e24 2024-12-12T19:34:54,300 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/9780a8dd51f546b9b6b8795c5fe18e24, entries=100, sequenceid=94, filesize=21.8 K 2024-12-12T19:34:54,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/1697b2d886524decae15e22fff838dda as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/1697b2d886524decae15e22fff838dda 2024-12-12T19:34:54,371 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/1697b2d886524decae15e22fff838dda, entries=100, sequenceid=94, filesize=9.4 K 2024-12-12T19:34:54,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/542f862409914b109b56a0e49c0813cc as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/542f862409914b109b56a0e49c0813cc 2024-12-12T19:34:54,409 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/542f862409914b109b56a0e49c0813cc, entries=100, sequenceid=94, filesize=9.4 K 2024-12-12T19:34:54,419 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 9e48e6f12fe9b1293921e76b13fdbb7f in 1702ms, sequenceid=94, compaction requested=true 2024-12-12T19:34:54,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:54,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:54,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-12T19:34:54,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-12T19:34:54,440 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-12-12T19:34:54,440 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8750 sec 2024-12-12T19:34:54,441 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees in 1.8830 sec 2024-12-12T19:34:54,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T19:34:54,666 INFO [Thread-2392 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-12-12T19:34:54,667 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:54,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees 2024-12-12T19:34:54,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-12T19:34:54,670 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:54,672 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:54,672 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:54,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-12T19:34:54,823 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:54,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-12T19:34:54,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:54,826 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T19:34:54,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:34:54,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:54,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:34:54,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:54,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:34:54,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:54,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212cb019164df1942ea90ea169d628781b6_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032092907/Put/seqid=0 2024-12-12T19:34:54,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742384_1560 (size=12154) 2024-12-12T19:34:54,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:54,894 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212cb019164df1942ea90ea169d628781b6_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212cb019164df1942ea90ea169d628781b6_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:54,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/2af186e57912456e88707058631d4a5b, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:54,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/2af186e57912456e88707058631d4a5b is 175, key is test_row_0/A:col10/1734032092907/Put/seqid=0 2024-12-12T19:34:54,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742385_1561 (size=30955) 2024-12-12T19:34:54,907 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/2af186e57912456e88707058631d4a5b 2024-12-12T19:34:54,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/8fa05eebfa5742bea58ceed677b7f376 is 50, key is test_row_0/B:col10/1734032092907/Put/seqid=0 2024-12-12T19:34:54,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-12T19:34:54,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742386_1562 (size=12001) 2024-12-12T19:34:54,979 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/8fa05eebfa5742bea58ceed677b7f376 2024-12-12T19:34:55,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/f410c456c1864c75a79630c06e37b155 is 50, key is test_row_0/C:col10/1734032092907/Put/seqid=0 2024-12-12T19:34:55,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742387_1563 (size=12001) 2024-12-12T19:34:55,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:55,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:55,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:55,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032155136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:55,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:55,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032155247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:55,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-12T19:34:55,451 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/f410c456c1864c75a79630c06e37b155 2024-12-12T19:34:55,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:55,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032155452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:55,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/2af186e57912456e88707058631d4a5b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/2af186e57912456e88707058631d4a5b 2024-12-12T19:34:55,540 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/2af186e57912456e88707058631d4a5b, entries=150, sequenceid=117, filesize=30.2 K 2024-12-12T19:34:55,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/8fa05eebfa5742bea58ceed677b7f376 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/8fa05eebfa5742bea58ceed677b7f376 2024-12-12T19:34:55,584 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/8fa05eebfa5742bea58ceed677b7f376, entries=150, sequenceid=117, filesize=11.7 K 2024-12-12T19:34:55,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/f410c456c1864c75a79630c06e37b155 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/f410c456c1864c75a79630c06e37b155 2024-12-12T19:34:55,623 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/f410c456c1864c75a79630c06e37b155, entries=150, sequenceid=117, filesize=11.7 K 2024-12-12T19:34:55,624 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 9e48e6f12fe9b1293921e76b13fdbb7f in 797ms, sequenceid=117, compaction requested=true 2024-12-12T19:34:55,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:55,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:55,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-12T19:34:55,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=156 2024-12-12T19:34:55,633 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-12T19:34:55,633 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 960 msec 2024-12-12T19:34:55,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees in 967 msec 2024-12-12T19:34:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:55,770 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T19:34:55,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:34:55,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:55,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:34:55,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:55,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:34:55,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:55,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-12T19:34:55,779 INFO [Thread-2392 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-12-12T19:34:55,784 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:55,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-12-12T19:34:55,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-12T19:34:55,788 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:55,789 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:55,789 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:55,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212707984fa84e24609aca1879060042e89_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032095108/Put/seqid=0 2024-12-12T19:34:55,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742388_1564 (size=14694) 2024-12-12T19:34:55,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-12T19:34:55,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:55,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032155907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:55,943 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:55,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T19:34:55,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:55,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:55,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:55,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:55,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:55,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:56,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032156015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:56,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-12T19:34:56,111 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:56,113 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T19:34:56,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:56,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:56,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:56,113 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:56,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032156224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:56,228 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:56,233 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212707984fa84e24609aca1879060042e89_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212707984fa84e24609aca1879060042e89_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:56,235 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/4bf9d5c8a6d14cbfb41d117ded95da34, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:56,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/4bf9d5c8a6d14cbfb41d117ded95da34 is 175, key is test_row_0/A:col10/1734032095108/Put/seqid=0 2024-12-12T19:34:56,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742389_1565 (size=39649) 2024-12-12T19:34:56,256 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=131, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/4bf9d5c8a6d14cbfb41d117ded95da34 2024-12-12T19:34:56,279 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:56,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T19:34:56,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:56,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:56,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:56,283 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,307 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/d9ba14436c214541898393449b8134fe is 50, key is test_row_0/B:col10/1734032095108/Put/seqid=0 2024-12-12T19:34:56,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742390_1566 (size=12101) 2024-12-12T19:34:56,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-12T19:34:56,442 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:56,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T19:34:56,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:56,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:56,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:56,443 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:56,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032156527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:56,603 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:56,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T19:34:56,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:56,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:56,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:56,605 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,759 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/d9ba14436c214541898393449b8134fe 2024-12-12T19:34:56,763 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:56,766 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T19:34:56,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:56,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:56,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:56,766 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/15021bee9ead454591e3bc7d0e44c057 is 50, key is test_row_0/C:col10/1734032095108/Put/seqid=0 2024-12-12T19:34:56,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742391_1567 (size=12101) 2024-12-12T19:34:56,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-12T19:34:56,919 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:56,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T19:34:56,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:56,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:56,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:56,923 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:56,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:57,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:57,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032157038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:57,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:57,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49102 deadline: 1734032157042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:57,047 DEBUG [Thread-2386 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:34:57,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:57,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49074 deadline: 1734032157058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:57,059 DEBUG [Thread-2388 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8176 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:34:57,077 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:57,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T19:34:57,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:57,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:57,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:57,079 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:57,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:57,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:57,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:57,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49114 deadline: 1734032157087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:57,090 DEBUG [Thread-2390 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8195 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:34:57,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:57,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49048 deadline: 1734032157142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:57,149 DEBUG [Thread-2384 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8258 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., hostname=4c9c438b6eeb,42689,1734031923038, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T19:34:57,223 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/15021bee9ead454591e3bc7d0e44c057 2024-12-12T19:34:57,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/4bf9d5c8a6d14cbfb41d117ded95da34 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/4bf9d5c8a6d14cbfb41d117ded95da34 2024-12-12T19:34:57,238 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/4bf9d5c8a6d14cbfb41d117ded95da34, entries=200, sequenceid=131, filesize=38.7 K 2024-12-12T19:34:57,238 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:57,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T19:34:57,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:57,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:57,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:57,239 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:57,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:57,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/d9ba14436c214541898393449b8134fe as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/d9ba14436c214541898393449b8134fe 2024-12-12T19:34:57,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:57,259 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/d9ba14436c214541898393449b8134fe, entries=150, sequenceid=131, filesize=11.8 K 2024-12-12T19:34:57,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/15021bee9ead454591e3bc7d0e44c057 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/15021bee9ead454591e3bc7d0e44c057 2024-12-12T19:34:57,303 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/15021bee9ead454591e3bc7d0e44c057, entries=150, sequenceid=131, filesize=11.8 K 2024-12-12T19:34:57,308 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 9e48e6f12fe9b1293921e76b13fdbb7f in 1537ms, sequenceid=131, compaction requested=true 2024-12-12T19:34:57,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:57,308 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-12T19:34:57,310 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:57,310 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:57,310 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:57,310 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:57,310 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:57,310 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:34:57,310 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-12T19:34:57,321 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 154978 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-12T19:34:57,321 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/A is initiating minor compaction (all files) 2024-12-12T19:34:57,321 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/A in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:57,322 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/9f3511a629b940379864f06b7407d2f3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/295b57c10dbf46f49986795cee95078c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/9780a8dd51f546b9b6b8795c5fe18e24, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/2af186e57912456e88707058631d4a5b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/4bf9d5c8a6d14cbfb41d117ded95da34] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=151.3 K 2024-12-12T19:34:57,322 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:57,322 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/9f3511a629b940379864f06b7407d2f3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/295b57c10dbf46f49986795cee95078c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/9780a8dd51f546b9b6b8795c5fe18e24, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/2af186e57912456e88707058631d4a5b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/4bf9d5c8a6d14cbfb41d117ded95da34] 2024-12-12T19:34:57,323 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f3511a629b940379864f06b7407d2f3, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734032088480 2024-12-12T19:34:57,323 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 295b57c10dbf46f49986795cee95078c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734032088865 2024-12-12T19:34:57,324 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 9780a8dd51f546b9b6b8795c5fe18e24, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734032090663 2024-12-12T19:34:57,325 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 57864 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-12T19:34:57,326 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/B is initiating minor compaction (all files) 2024-12-12T19:34:57,326 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/B in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:57,326 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/18dc33f0d7ce4a6899fe7a4492681ccf, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/03c6edbf1eec44d491e571ba621acd19, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/1697b2d886524decae15e22fff838dda, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/8fa05eebfa5742bea58ceed677b7f376, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/d9ba14436c214541898393449b8134fe] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=56.5 K 2024-12-12T19:34:57,327 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 2af186e57912456e88707058631d4a5b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734032092894 2024-12-12T19:34:57,328 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bf9d5c8a6d14cbfb41d117ded95da34, keycount=200, bloomtype=ROW, size=38.7 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734032095108 2024-12-12T19:34:57,330 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18dc33f0d7ce4a6899fe7a4492681ccf, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734032088480 2024-12-12T19:34:57,335 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03c6edbf1eec44d491e571ba621acd19, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734032088865 2024-12-12T19:34:57,338 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1697b2d886524decae15e22fff838dda, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734032090663 2024-12-12T19:34:57,339 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8fa05eebfa5742bea58ceed677b7f376, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734032092894 2024-12-12T19:34:57,342 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9ba14436c214541898393449b8134fe, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734032095108 2024-12-12T19:34:57,360 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:57,365 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212122d3e0414634d71816f1391f2b86daa_9e48e6f12fe9b1293921e76b13fdbb7f store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:57,368 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212122d3e0414634d71816f1391f2b86daa_9e48e6f12fe9b1293921e76b13fdbb7f, store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:57,368 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212122d3e0414634d71816f1391f2b86daa_9e48e6f12fe9b1293921e76b13fdbb7f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:57,387 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#B#compaction#478 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:57,387 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/235b09b7413c48ce982c38238d24c91b is 50, key is test_row_0/B:col10/1734032095108/Put/seqid=0 2024-12-12T19:34:57,391 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:57,391 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T19:34:57,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:57,392 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T19:34:57,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:34:57,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:57,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:34:57,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:57,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:34:57,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:57,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742393_1569 (size=12375) 2024-12-12T19:34:57,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742392_1568 (size=4469) 2024-12-12T19:34:57,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120fbea9a1c51c40dfba64a48141c4ba9b_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032095871/Put/seqid=0 2024-12-12T19:34:57,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742394_1570 (size=12304) 2024-12-12T19:34:57,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,513 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120fbea9a1c51c40dfba64a48141c4ba9b_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120fbea9a1c51c40dfba64a48141c4ba9b_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:57,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/3c6ec2374bb44018801d737f671c6899, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:57,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/3c6ec2374bb44018801d737f671c6899 is 175, key is test_row_0/A:col10/1734032095871/Put/seqid=0 2024-12-12T19:34:57,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742395_1571 (size=31105) 2024-12-12T19:34:57,563 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=153, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/3c6ec2374bb44018801d737f671c6899 2024-12-12T19:34:57,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/eba9d637348240a59597119a113b9108 is 50, key is test_row_0/B:col10/1734032095871/Put/seqid=0 2024-12-12T19:34:57,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742396_1572 (size=12151) 2024-12-12T19:34:57,617 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/eba9d637348240a59597119a113b9108 2024-12-12T19:34:57,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/686c4fa008ab43849f238bf4b6bcf305 is 50, key is test_row_0/C:col10/1734032095871/Put/seqid=0 2024-12-12T19:34:57,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742397_1573 (size=12151) 2024-12-12T19:34:57,659 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/686c4fa008ab43849f238bf4b6bcf305 2024-12-12T19:34:57,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/3c6ec2374bb44018801d737f671c6899 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/3c6ec2374bb44018801d737f671c6899 2024-12-12T19:34:57,676 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/3c6ec2374bb44018801d737f671c6899, entries=150, sequenceid=153, filesize=30.4 K 2024-12-12T19:34:57,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/eba9d637348240a59597119a113b9108 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/eba9d637348240a59597119a113b9108 2024-12-12T19:34:57,706 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/eba9d637348240a59597119a113b9108, entries=150, sequenceid=153, filesize=11.9 K 2024-12-12T19:34:57,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/686c4fa008ab43849f238bf4b6bcf305 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/686c4fa008ab43849f238bf4b6bcf305 2024-12-12T19:34:57,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,715 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/686c4fa008ab43849f238bf4b6bcf305, entries=150, sequenceid=153, filesize=11.9 K 2024-12-12T19:34:57,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,716 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for 9e48e6f12fe9b1293921e76b13fdbb7f in 325ms, sequenceid=153, compaction requested=true 2024-12-12T19:34:57,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:57,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:57,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-12-12T19:34:57,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-12-12T19:34:57,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,728 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-12-12T19:34:57,728 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9320 sec 2024-12-12T19:34:57,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,730 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 1.9440 sec 2024-12-12T19:34:57,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,831 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/235b09b7413c48ce982c38238d24c91b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/235b09b7413c48ce982c38238d24c91b 2024-12-12T19:34:57,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,832 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#A#compaction#477 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,832 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/e69a72d8a3cc4c47abafafb2dafc7a2f is 175, key is test_row_0/A:col10/1734032095108/Put/seqid=0 2024-12-12T19:34:57,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,846 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/B of 9e48e6f12fe9b1293921e76b13fdbb7f into 235b09b7413c48ce982c38238d24c91b(size=12.1 K), total size for store is 24.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:57,846 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:57,846 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/B, priority=11, startTime=1734032097310; duration=0sec 2024-12-12T19:34:57,846 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:57,846 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:B 2024-12-12T19:34:57,847 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-12T19:34:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,848 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 70015 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-12T19:34:57,848 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/C is initiating minor compaction (all files) 2024-12-12T19:34:57,848 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/C in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:57,849 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/500cf24034604fa9ad968f642f47ac9d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/740174a23489423bb0f9acadd35e2cca, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/542f862409914b109b56a0e49c0813cc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/f410c456c1864c75a79630c06e37b155, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/15021bee9ead454591e3bc7d0e44c057, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/686c4fa008ab43849f238bf4b6bcf305] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=68.4 K 2024-12-12T19:34:57,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,851 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 500cf24034604fa9ad968f642f47ac9d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734032088480 2024-12-12T19:34:57,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,852 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 740174a23489423bb0f9acadd35e2cca, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734032088865 2024-12-12T19:34:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,853 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 542f862409914b109b56a0e49c0813cc, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734032090663 2024-12-12T19:34:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,854 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting f410c456c1864c75a79630c06e37b155, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734032092894 2024-12-12T19:34:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,855 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15021bee9ead454591e3bc7d0e44c057, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734032095108 2024-12-12T19:34:57,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,855 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 686c4fa008ab43849f238bf4b6bcf305, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1734032095871 2024-12-12T19:34:57,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742398_1574 (size=31329) 2024-12-12T19:34:57,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-12T19:34:57,890 INFO [Thread-2392 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-12-12T19:34:57,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,891 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:57,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-12-12T19:34:57,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-12T19:34:57,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,905 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:57,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,911 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:57,911 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,918 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#C#compaction#482 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:57,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,918 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/3b65ee521e684c5caf909f1a4768384d is 50, key is test_row_0/C:col10/1734032095871/Put/seqid=0 2024-12-12T19:34:57,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742399_1575 (size=12459) 2024-12-12T19:34:57,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:57,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-12T19:34:58,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,070 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:58,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-12T19:34:58,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:58,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:58,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:58,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-12-12T19:34:58,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-12-12T19:34:58,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-12T19:34:58,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 161 msec 2024-12-12T19:34:58,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 193 msec 2024-12-12T19:34:58,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:58,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T19:34:58,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:34:58,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:58,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:34:58,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:58,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:34:58,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:58,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-12T19:34:58,208 INFO [Thread-2392 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-12-12T19:34:58,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,212 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:34:58,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-12-12T19:34:58,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T19:34:58,219 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:34:58,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,228 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:34:58,228 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:34:58,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,230 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e6d82d50976149adbe0e4218f79bcfb0_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032098155/Put/seqid=0 2024-12-12T19:34:58,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742400_1576 (size=24758) 2024-12-12T19:34:58,275 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:58,304 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e6d82d50976149adbe0e4218f79bcfb0_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e6d82d50976149adbe0e4218f79bcfb0_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:58,316 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/418abfb6b1d8438d9746addc0e11ab4d, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:58,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/418abfb6b1d8438d9746addc0e11ab4d is 175, key is test_row_0/A:col10/1734032098155/Put/seqid=0 2024-12-12T19:34:58,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T19:34:58,320 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/e69a72d8a3cc4c47abafafb2dafc7a2f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/e69a72d8a3cc4c47abafafb2dafc7a2f 2024-12-12T19:34:58,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742401_1577 (size=74395) 2024-12-12T19:34:58,358 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=166, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/418abfb6b1d8438d9746addc0e11ab4d 2024-12-12T19:34:58,360 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/A of 9e48e6f12fe9b1293921e76b13fdbb7f into e69a72d8a3cc4c47abafafb2dafc7a2f(size=30.6 K), total size for store is 61.0 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-12-12T19:34:58,360 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:58,360 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/A, priority=11, startTime=1734032097308; duration=1sec 2024-12-12T19:34:58,360 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:58,360 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:A 2024-12-12T19:34:58,407 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:58,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T19:34:58,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:58,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:58,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:58,408 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:58,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:58,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:58,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:58,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032158419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:58,439 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/3b65ee521e684c5caf909f1a4768384d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/3b65ee521e684c5caf909f1a4768384d 2024-12-12T19:34:58,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/b8a2a251cd9346e9bff6ce22ed69812d is 50, key is test_row_0/B:col10/1734032098155/Put/seqid=0 2024-12-12T19:34:58,467 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/C of 9e48e6f12fe9b1293921e76b13fdbb7f into 3b65ee521e684c5caf909f1a4768384d(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:58,467 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:58,467 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/C, priority=10, startTime=1734032097310; duration=0sec 2024-12-12T19:34:58,467 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:58,467 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:C 2024-12-12T19:34:58,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742402_1578 (size=12151) 2024-12-12T19:34:58,496 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/b8a2a251cd9346e9bff6ce22ed69812d 2024-12-12T19:34:58,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T19:34:58,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/7527a602019f41b695160f0b80f1abe6 is 50, key is test_row_0/C:col10/1734032098155/Put/seqid=0 2024-12-12T19:34:58,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:58,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032158534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:58,561 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:58,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T19:34:58,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:58,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:58,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:58,563 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:58,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:58,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:58,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742403_1579 (size=12151) 2024-12-12T19:34:58,727 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:58,731 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T19:34:58,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:58,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:58,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:58,732 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:58,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:58,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:58,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:58,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032158737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:58,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T19:34:58,891 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:58,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T19:34:58,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:58,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:58,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:58,891 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:58,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:58,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:58,984 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/7527a602019f41b695160f0b80f1abe6 2024-12-12T19:34:58,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/418abfb6b1d8438d9746addc0e11ab4d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/418abfb6b1d8438d9746addc0e11ab4d 2024-12-12T19:34:59,009 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/418abfb6b1d8438d9746addc0e11ab4d, entries=400, sequenceid=166, filesize=72.7 K 2024-12-12T19:34:59,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/b8a2a251cd9346e9bff6ce22ed69812d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/b8a2a251cd9346e9bff6ce22ed69812d 2024-12-12T19:34:59,026 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/b8a2a251cd9346e9bff6ce22ed69812d, entries=150, sequenceid=166, filesize=11.9 K 2024-12-12T19:34:59,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/7527a602019f41b695160f0b80f1abe6 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/7527a602019f41b695160f0b80f1abe6 2024-12-12T19:34:59,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/7527a602019f41b695160f0b80f1abe6, entries=150, sequenceid=166, filesize=11.9 K 2024-12-12T19:34:59,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 9e48e6f12fe9b1293921e76b13fdbb7f in 862ms, sequenceid=166, compaction requested=true 2024-12-12T19:34:59,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:59,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:59,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:59,044 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:59,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:59,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:34:59,045 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:59,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:59,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:59,045 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:59,045 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/A is initiating minor compaction (all files) 2024-12-12T19:34:59,045 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/A in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:59,046 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/e69a72d8a3cc4c47abafafb2dafc7a2f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/3c6ec2374bb44018801d737f671c6899, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/418abfb6b1d8438d9746addc0e11ab4d] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=133.6 K 2024-12-12T19:34:59,046 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:59,046 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/e69a72d8a3cc4c47abafafb2dafc7a2f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/3c6ec2374bb44018801d737f671c6899, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/418abfb6b1d8438d9746addc0e11ab4d] 2024-12-12T19:34:59,046 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36677 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:59,046 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/B is initiating minor compaction (all files) 2024-12-12T19:34:59,046 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/B in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:59,046 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/235b09b7413c48ce982c38238d24c91b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/eba9d637348240a59597119a113b9108, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/b8a2a251cd9346e9bff6ce22ed69812d] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=35.8 K 2024-12-12T19:34:59,046 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting e69a72d8a3cc4c47abafafb2dafc7a2f, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734032095108 2024-12-12T19:34:59,046 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 235b09b7413c48ce982c38238d24c91b, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734032095108 2024-12-12T19:34:59,047 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting eba9d637348240a59597119a113b9108, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1734032095871 2024-12-12T19:34:59,047 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c6ec2374bb44018801d737f671c6899, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1734032095871 2024-12-12T19:34:59,048 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting b8a2a251cd9346e9bff6ce22ed69812d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1734032098155 2024-12-12T19:34:59,048 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 418abfb6b1d8438d9746addc0e11ab4d, keycount=400, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1734032098091 2024-12-12T19:34:59,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:59,049 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:59,050 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T19:34:59,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T19:34:59,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:59,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:59,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:59,050 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:34:59,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:59,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:34:59,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:59,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:34:59,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:34:59,069 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212fd1e6183d0d440b0863d29affe9cd464_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032099048/Put/seqid=0 2024-12-12T19:34:59,075 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:59,076 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#B#compaction#487 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:59,076 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/bdbab00ebc8844638bed16475c3ba0da is 50, key is test_row_0/B:col10/1734032098155/Put/seqid=0 2024-12-12T19:34:59,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:59,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032159111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:59,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742404_1580 (size=12304) 2024-12-12T19:34:59,124 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121256ed7404f7c441cc9af65ed6ed541d1e_9e48e6f12fe9b1293921e76b13fdbb7f store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:59,127 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:34:59,127 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121256ed7404f7c441cc9af65ed6ed541d1e_9e48e6f12fe9b1293921e76b13fdbb7f, store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:59,127 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121256ed7404f7c441cc9af65ed6ed541d1e_9e48e6f12fe9b1293921e76b13fdbb7f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:59,162 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212fd1e6183d0d440b0863d29affe9cd464_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212fd1e6183d0d440b0863d29affe9cd464_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:34:59,164 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/ef98435d8fbe4c4dbb306487d441268e, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:34:59,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/ef98435d8fbe4c4dbb306487d441268e is 175, key is test_row_0/A:col10/1734032099048/Put/seqid=0 2024-12-12T19:34:59,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742405_1581 (size=12527) 2024-12-12T19:34:59,195 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/bdbab00ebc8844638bed16475c3ba0da as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/bdbab00ebc8844638bed16475c3ba0da 2024-12-12T19:34:59,204 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/B of 9e48e6f12fe9b1293921e76b13fdbb7f into bdbab00ebc8844638bed16475c3ba0da(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:34:59,204 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:59,204 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/B, priority=13, startTime=1734032099044; duration=0sec 2024-12-12T19:34:59,204 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:59,204 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:B 2024-12-12T19:34:59,204 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-12T19:34:59,205 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:59,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T19:34:59,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:59,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:59,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:59,208 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,219 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-12T19:34:59,219 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-12T19:34:59,219 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. because compaction request was cancelled 2024-12-12T19:34:59,219 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:C 2024-12-12T19:34:59,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742407_1583 (size=31105) 2024-12-12T19:34:59,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:59,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032159232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:59,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742406_1582 (size=4469) 2024-12-12T19:34:59,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T19:34:59,367 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:59,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T19:34:59,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:59,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:59,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:59,368 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:59,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032159439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:59,521 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:59,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T19:34:59,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:59,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:59,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:59,523 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,630 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=194, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/ef98435d8fbe4c4dbb306487d441268e 2024-12-12T19:34:59,636 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/bb20be55cfa440e39cfe94bb42f66971 is 50, key is test_row_0/B:col10/1734032099048/Put/seqid=0 2024-12-12T19:34:59,648 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#A#compaction#488 average throughput is 0.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:59,648 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/cc1ed44478f6442a9834018235fc0e36 is 175, key is test_row_0/A:col10/1734032098155/Put/seqid=0 2024-12-12T19:34:59,675 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:59,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T19:34:59,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:59,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:59,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:59,677 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742408_1584 (size=12151) 2024-12-12T19:34:59,692 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/bb20be55cfa440e39cfe94bb42f66971 2024-12-12T19:34:59,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742409_1585 (size=31481) 2024-12-12T19:34:59,704 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/93a35425e3ef41fbaf5dada52fe4d8c3 is 50, key is test_row_0/C:col10/1734032099048/Put/seqid=0 2024-12-12T19:34:59,748 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:34:59,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032159745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:59,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742410_1586 (size=12151) 2024-12-12T19:34:59,752 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/93a35425e3ef41fbaf5dada52fe4d8c3 2024-12-12T19:34:59,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/ef98435d8fbe4c4dbb306487d441268e as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/ef98435d8fbe4c4dbb306487d441268e 2024-12-12T19:34:59,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/ef98435d8fbe4c4dbb306487d441268e, entries=150, sequenceid=194, filesize=30.4 K 2024-12-12T19:34:59,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/bb20be55cfa440e39cfe94bb42f66971 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/bb20be55cfa440e39cfe94bb42f66971 2024-12-12T19:34:59,800 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/bb20be55cfa440e39cfe94bb42f66971, entries=150, sequenceid=194, filesize=11.9 K 2024-12-12T19:34:59,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/93a35425e3ef41fbaf5dada52fe4d8c3 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/93a35425e3ef41fbaf5dada52fe4d8c3 2024-12-12T19:34:59,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/93a35425e3ef41fbaf5dada52fe4d8c3, entries=150, sequenceid=194, filesize=11.9 K 2024-12-12T19:34:59,839 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:59,839 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T19:34:59,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:59,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:34:59,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:59,840 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,842 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 9e48e6f12fe9b1293921e76b13fdbb7f in 792ms, sequenceid=194, compaction requested=true 2024-12-12T19:34:59,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:34:59,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:34:59,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:34:59,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:34:59,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:34:59,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:34:59,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-12T19:34:59,843 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 3 compacting, 1 eligible, 16 blocking 2024-12-12T19:34:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:34:59,844 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-12T19:34:59,844 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-12T19:34:59,844 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. because compaction request was cancelled 2024-12-12T19:34:59,844 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:A 2024-12-12T19:34:59,844 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:34:59,855 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:34:59,855 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/C is initiating minor compaction (all files) 2024-12-12T19:34:59,856 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/C in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:34:59,856 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/3b65ee521e684c5caf909f1a4768384d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/7527a602019f41b695160f0b80f1abe6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/93a35425e3ef41fbaf5dada52fe4d8c3] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=35.9 K 2024-12-12T19:34:59,863 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b65ee521e684c5caf909f1a4768384d, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1734032095871 2024-12-12T19:34:59,869 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 7527a602019f41b695160f0b80f1abe6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1734032098155 2024-12-12T19:34:59,872 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 93a35425e3ef41fbaf5dada52fe4d8c3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734032098317 2024-12-12T19:34:59,916 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#C#compaction#491 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:34:59,917 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/097bcb47c8194fefb435ba4de2b56846 is 50, key is test_row_0/C:col10/1734032099048/Put/seqid=0 2024-12-12T19:34:59,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742411_1587 (size=12561) 2024-12-12T19:34:59,999 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:34:59,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T19:35:00,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:00,000 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-12T19:35:00,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:35:00,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:00,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:35:00,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:00,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:35:00,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:00,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121211f7fbdedc1f47b284d1156609b8310a_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032099102/Put/seqid=0 2024-12-12T19:35:00,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742412_1588 (size=12304) 2024-12-12T19:35:00,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:35:00,015 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121211f7fbdedc1f47b284d1156609b8310a_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121211f7fbdedc1f47b284d1156609b8310a_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:00,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/976e01c8b3f24b4a908c91621fb3b4fb, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:00,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/976e01c8b3f24b4a908c91621fb3b4fb is 175, key is test_row_0/A:col10/1734032099102/Put/seqid=0 2024-12-12T19:35:00,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742413_1589 (size=31105) 2024-12-12T19:35:00,098 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/cc1ed44478f6442a9834018235fc0e36 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/cc1ed44478f6442a9834018235fc0e36 2024-12-12T19:35:00,101 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/A of 9e48e6f12fe9b1293921e76b13fdbb7f into cc1ed44478f6442a9834018235fc0e36(size=30.7 K), total size for store is 61.1 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-12-12T19:35:00,101 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:00,101 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/A, priority=13, startTime=1734032099044; duration=1sec 2024-12-12T19:35:00,101 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:35:00,102 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:A 2024-12-12T19:35:00,102 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-12T19:35:00,102 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-12T19:35:00,102 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-12T19:35:00,102 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. because compaction request was cancelled 2024-12-12T19:35:00,102 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:B 2024-12-12T19:35:00,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:00,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:35:00,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T19:35:00,353 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/097bcb47c8194fefb435ba4de2b56846 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/097bcb47c8194fefb435ba4de2b56846 2024-12-12T19:35:00,364 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/C of 9e48e6f12fe9b1293921e76b13fdbb7f into 097bcb47c8194fefb435ba4de2b56846(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:35:00,364 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:00,364 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/C, priority=13, startTime=1734032099843; duration=0sec 2024-12-12T19:35:00,364 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:35:00,364 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:C 2024-12-12T19:35:00,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:00,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032160385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:00,427 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=205, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/976e01c8b3f24b4a908c91621fb3b4fb 2024-12-12T19:35:00,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/c452ec0293ba4bc383cf4db4603d10ba is 50, key is test_row_0/B:col10/1734032099102/Put/seqid=0 2024-12-12T19:35:00,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742414_1590 (size=12151) 2024-12-12T19:35:00,479 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/c452ec0293ba4bc383cf4db4603d10ba 2024-12-12T19:35:00,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032160489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:00,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/b5b3a1ca6c6d48e4a787a2770e7d8406 is 50, key is test_row_0/C:col10/1734032099102/Put/seqid=0 2024-12-12T19:35:00,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742415_1591 (size=12151) 2024-12-12T19:35:00,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:00,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032160698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:00,875 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T19:35:00,963 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/b5b3a1ca6c6d48e4a787a2770e7d8406 2024-12-12T19:35:00,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/976e01c8b3f24b4a908c91621fb3b4fb as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/976e01c8b3f24b4a908c91621fb3b4fb 2024-12-12T19:35:01,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:01,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032161003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:01,007 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/976e01c8b3f24b4a908c91621fb3b4fb, entries=150, sequenceid=205, filesize=30.4 K 2024-12-12T19:35:01,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/c452ec0293ba4bc383cf4db4603d10ba as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/c452ec0293ba4bc383cf4db4603d10ba 2024-12-12T19:35:01,020 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/c452ec0293ba4bc383cf4db4603d10ba, entries=150, sequenceid=205, filesize=11.9 K 2024-12-12T19:35:01,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/b5b3a1ca6c6d48e4a787a2770e7d8406 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/b5b3a1ca6c6d48e4a787a2770e7d8406 2024-12-12T19:35:01,027 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/b5b3a1ca6c6d48e4a787a2770e7d8406, entries=150, sequenceid=205, filesize=11.9 K 2024-12-12T19:35:01,030 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 9e48e6f12fe9b1293921e76b13fdbb7f in 1030ms, sequenceid=205, compaction requested=true 2024-12-12T19:35:01,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:01,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:01,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-12-12T19:35:01,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-12-12T19:35:01,042 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-12-12T19:35:01,042 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8090 sec 2024-12-12T19:35:01,044 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 2.8310 sec 2024-12-12T19:35:01,520 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-12T19:35:01,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:35:01,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:01,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:35:01,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:01,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:35:01,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:01,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:01,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121229a3cab843754b4cacb455e52a19af7b_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032101513/Put/seqid=0 2024-12-12T19:35:01,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:01,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032161564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:01,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742416_1592 (size=12304) 2024-12-12T19:35:01,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:01,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032161668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:01,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:01,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032161870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:02,006 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:35:02,016 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121229a3cab843754b4cacb455e52a19af7b_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121229a3cab843754b4cacb455e52a19af7b_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:02,019 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/e1ee304881de404b88eb4ecc083bb6de, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:02,020 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/e1ee304881de404b88eb4ecc083bb6de is 175, key is test_row_0/A:col10/1734032101513/Put/seqid=0 2024-12-12T19:35:02,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742417_1593 (size=31105) 2024-12-12T19:35:02,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:02,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032162176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:02,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T19:35:02,330 INFO [Thread-2392 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-12-12T19:35:02,341 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:35:02,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-12-12T19:35:02,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T19:35:02,352 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:35:02,355 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:35:02,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:35:02,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T19:35:02,463 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=234, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/e1ee304881de404b88eb4ecc083bb6de 2024-12-12T19:35:02,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/fffe1d32552a4863aa580afa0763fbe2 is 50, key is test_row_0/B:col10/1734032101513/Put/seqid=0 2024-12-12T19:35:02,518 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:02,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T19:35:02,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:02,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:35:02,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:02,519 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:02,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:02,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:02,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742418_1594 (size=12151) 2024-12-12T19:35:02,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T19:35:02,675 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:02,678 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T19:35:02,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:02,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:35:02,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:02,678 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:02,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:02,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:02,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:02,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032162682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:02,832 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:02,833 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T19:35:02,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:02,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:35:02,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:02,833 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:02,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:02,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:02,929 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/fffe1d32552a4863aa580afa0763fbe2 2024-12-12T19:35:02,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/38876bb80fc74038a38e09599fa29ce7 is 50, key is test_row_0/C:col10/1734032101513/Put/seqid=0 2024-12-12T19:35:02,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T19:35:02,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742419_1595 (size=12151) 2024-12-12T19:35:02,991 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:02,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T19:35:02,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:02,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:35:02,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:02,993 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:02,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:02,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:02,994 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/38876bb80fc74038a38e09599fa29ce7 2024-12-12T19:35:03,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/e1ee304881de404b88eb4ecc083bb6de as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/e1ee304881de404b88eb4ecc083bb6de 2024-12-12T19:35:03,047 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/e1ee304881de404b88eb4ecc083bb6de, entries=150, sequenceid=234, filesize=30.4 K 2024-12-12T19:35:03,048 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/fffe1d32552a4863aa580afa0763fbe2 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/fffe1d32552a4863aa580afa0763fbe2 2024-12-12T19:35:03,062 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/fffe1d32552a4863aa580afa0763fbe2, entries=150, sequenceid=234, filesize=11.9 K 2024-12-12T19:35:03,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/38876bb80fc74038a38e09599fa29ce7 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/38876bb80fc74038a38e09599fa29ce7 2024-12-12T19:35:03,092 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/38876bb80fc74038a38e09599fa29ce7, entries=150, sequenceid=234, filesize=11.9 K 2024-12-12T19:35:03,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 9e48e6f12fe9b1293921e76b13fdbb7f in 1575ms, sequenceid=234, compaction requested=true 2024-12-12T19:35:03,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:03,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:35:03,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:35:03,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:35:03,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:35:03,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:35:03,096 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:35:03,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T19:35:03,097 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124796 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:35:03,097 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/A is initiating minor compaction (all files) 2024-12-12T19:35:03,097 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/A in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:03,097 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/cc1ed44478f6442a9834018235fc0e36, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/ef98435d8fbe4c4dbb306487d441268e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/976e01c8b3f24b4a908c91621fb3b4fb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/e1ee304881de404b88eb4ecc083bb6de] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=121.9 K 2024-12-12T19:35:03,097 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:03,097 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/cc1ed44478f6442a9834018235fc0e36, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/ef98435d8fbe4c4dbb306487d441268e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/976e01c8b3f24b4a908c91621fb3b4fb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/e1ee304881de404b88eb4ecc083bb6de] 2024-12-12T19:35:03,098 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting cc1ed44478f6442a9834018235fc0e36, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1734032098155 2024-12-12T19:35:03,099 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T19:35:03,103 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting ef98435d8fbe4c4dbb306487d441268e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734032098317 2024-12-12T19:35:03,108 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 976e01c8b3f24b4a908c91621fb3b4fb, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1734032099093 2024-12-12T19:35:03,108 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting e1ee304881de404b88eb4ecc083bb6de, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1734032100367 2024-12-12T19:35:03,108 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T19:35:03,108 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/B is initiating minor compaction (all files) 2024-12-12T19:35:03,108 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/B in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:03,109 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/bdbab00ebc8844638bed16475c3ba0da, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/bb20be55cfa440e39cfe94bb42f66971, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/c452ec0293ba4bc383cf4db4603d10ba, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/fffe1d32552a4863aa580afa0763fbe2] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=47.8 K 2024-12-12T19:35:03,115 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting bdbab00ebc8844638bed16475c3ba0da, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1734032098155 2024-12-12T19:35:03,116 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb20be55cfa440e39cfe94bb42f66971, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734032098317 2024-12-12T19:35:03,119 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting c452ec0293ba4bc383cf4db4603d10ba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1734032099093 2024-12-12T19:35:03,123 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting fffe1d32552a4863aa580afa0763fbe2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1734032100367 2024-12-12T19:35:03,131 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:03,137 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#B#compaction#499 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:35:03,138 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/c0ecc19f163a4d54a4ad0bfd272da05b is 50, key is test_row_0/B:col10/1734032101513/Put/seqid=0 2024-12-12T19:35:03,139 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121278003deeb1bf4faa80805be4313e0ae1_9e48e6f12fe9b1293921e76b13fdbb7f store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:03,141 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121278003deeb1bf4faa80805be4313e0ae1_9e48e6f12fe9b1293921e76b13fdbb7f, store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:03,142 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121278003deeb1bf4faa80805be4313e0ae1_9e48e6f12fe9b1293921e76b13fdbb7f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:03,149 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:03,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T19:35:03,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:03,152 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-12T19:35:03,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:35:03,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:03,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:35:03,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:03,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:35:03,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:03,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742420_1596 (size=12663) 2024-12-12T19:35:03,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a645902245124d9ea0f7bf3e147bc812_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032101551/Put/seqid=0 2024-12-12T19:35:03,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742421_1597 (size=4469) 2024-12-12T19:35:03,232 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#A#compaction#498 average throughput is 0.24 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:35:03,233 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/5aa8b2268c684c1abd5671913576e897 is 175, key is test_row_0/A:col10/1734032101513/Put/seqid=0 2024-12-12T19:35:03,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742422_1598 (size=12304) 2024-12-12T19:35:03,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742423_1599 (size=31617) 2024-12-12T19:35:03,320 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/5aa8b2268c684c1abd5671913576e897 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/5aa8b2268c684c1abd5671913576e897 2024-12-12T19:35:03,387 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/A of 9e48e6f12fe9b1293921e76b13fdbb7f into 5aa8b2268c684c1abd5671913576e897(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:35:03,387 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:03,387 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/A, priority=12, startTime=1734032103096; duration=0sec 2024-12-12T19:35:03,388 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:35:03,388 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:A 2024-12-12T19:35:03,388 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:35:03,400 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:35:03,400 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/C is initiating minor compaction (all files) 2024-12-12T19:35:03,400 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/C in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:03,400 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/097bcb47c8194fefb435ba4de2b56846, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/b5b3a1ca6c6d48e4a787a2770e7d8406, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/38876bb80fc74038a38e09599fa29ce7] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=36.0 K 2024-12-12T19:35:03,404 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 097bcb47c8194fefb435ba4de2b56846, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734032098317 2024-12-12T19:35:03,405 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting b5b3a1ca6c6d48e4a787a2770e7d8406, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1734032099093 2024-12-12T19:35:03,407 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 38876bb80fc74038a38e09599fa29ce7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1734032100367 2024-12-12T19:35:03,433 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#C#compaction#501 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:35:03,433 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/ae6ab58c5f034c04b54f1bd62c32528a is 50, key is test_row_0/C:col10/1734032101513/Put/seqid=0 2024-12-12T19:35:03,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T19:35:03,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742424_1600 (size=12663) 2024-12-12T19:35:03,643 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/c0ecc19f163a4d54a4ad0bfd272da05b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/c0ecc19f163a4d54a4ad0bfd272da05b 2024-12-12T19:35:03,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:35:03,702 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/B of 9e48e6f12fe9b1293921e76b13fdbb7f into c0ecc19f163a4d54a4ad0bfd272da05b(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:35:03,702 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:03,702 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/B, priority=12, startTime=1734032103096; duration=0sec 2024-12-12T19:35:03,702 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:35:03,702 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:B 2024-12-12T19:35:03,718 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a645902245124d9ea0f7bf3e147bc812_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a645902245124d9ea0f7bf3e147bc812_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:03,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/36ca8fec15264ecab15d0cd7f26bd21d, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:03,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/36ca8fec15264ecab15d0cd7f26bd21d is 175, key is test_row_0/A:col10/1734032101551/Put/seqid=0 2024-12-12T19:35:03,726 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:35:03,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:03,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742425_1601 (size=31105) 2024-12-12T19:35:03,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:03,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032163863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:03,920 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/ae6ab58c5f034c04b54f1bd62c32528a as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/ae6ab58c5f034c04b54f1bd62c32528a 2024-12-12T19:35:03,939 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/C of 9e48e6f12fe9b1293921e76b13fdbb7f into ae6ab58c5f034c04b54f1bd62c32528a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:35:03,939 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:03,939 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/C, priority=13, startTime=1734032103096; duration=0sec 2024-12-12T19:35:03,939 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:35:03,939 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:C 2024-12-12T19:35:03,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:03,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032163975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:04,149 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=243, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/36ca8fec15264ecab15d0cd7f26bd21d 2024-12-12T19:35:04,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/4d919aa21d6849cbaea510d42db130ff is 50, key is test_row_0/B:col10/1734032101551/Put/seqid=0 2024-12-12T19:35:04,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:04,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032164187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:04,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742426_1602 (size=12151) 2024-12-12T19:35:04,208 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/4d919aa21d6849cbaea510d42db130ff 2024-12-12T19:35:04,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/ba79f22132324c74bf6d24f6756572c8 is 50, key is test_row_0/C:col10/1734032101551/Put/seqid=0 2024-12-12T19:35:04,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742427_1603 (size=12151) 2024-12-12T19:35:04,300 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/ba79f22132324c74bf6d24f6756572c8 2024-12-12T19:35:04,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/36ca8fec15264ecab15d0cd7f26bd21d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/36ca8fec15264ecab15d0cd7f26bd21d 2024-12-12T19:35:04,356 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/36ca8fec15264ecab15d0cd7f26bd21d, entries=150, sequenceid=243, filesize=30.4 K 2024-12-12T19:35:04,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/4d919aa21d6849cbaea510d42db130ff as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/4d919aa21d6849cbaea510d42db130ff 2024-12-12T19:35:04,380 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/4d919aa21d6849cbaea510d42db130ff, entries=150, sequenceid=243, filesize=11.9 K 2024-12-12T19:35:04,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/ba79f22132324c74bf6d24f6756572c8 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/ba79f22132324c74bf6d24f6756572c8 2024-12-12T19:35:04,420 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/ba79f22132324c74bf6d24f6756572c8, entries=150, sequenceid=243, filesize=11.9 K 2024-12-12T19:35:04,427 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 9e48e6f12fe9b1293921e76b13fdbb7f in 1275ms, sequenceid=243, compaction requested=false 2024-12-12T19:35:04,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:04,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:04,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-12-12T19:35:04,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-12-12T19:35:04,447 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-12T19:35:04,447 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0790 sec 2024-12-12T19:35:04,456 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 2.1090 sec 2024-12-12T19:35:04,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T19:35:04,456 INFO [Thread-2392 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-12T19:35:04,470 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T19:35:04,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-12-12T19:35:04,480 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T19:35:04,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T19:35:04,481 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T19:35:04,481 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T19:35:04,508 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-12T19:35:04,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:35:04,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:04,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:35:04,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:04,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:35:04,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:04,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:04,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:04,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032164552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:04,565 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212744ce8edf17545e2b64b578bfdae8170_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032104496/Put/seqid=0 2024-12-12T19:35:04,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T19:35:04,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742428_1604 (size=12454) 2024-12-12T19:35:04,633 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:04,634 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T19:35:04,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:04,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:35:04,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:04,634 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:04,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:04,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:04,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:04,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032164659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:04,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T19:35:04,801 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:04,802 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T19:35:04,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:04,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:35:04,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:04,803 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:04,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:04,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:04,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:04,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032164867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:04,958 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:04,960 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T19:35:04,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:04,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:35:04,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:04,960 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:04,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:04,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:05,028 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:35:05,037 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212744ce8edf17545e2b64b578bfdae8170_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212744ce8edf17545e2b64b578bfdae8170_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:05,038 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/17af146ca6c64191bf8eff75d98db29c, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:05,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/17af146ca6c64191bf8eff75d98db29c is 175, key is test_row_0/A:col10/1734032104496/Put/seqid=0 2024-12-12T19:35:05,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742429_1605 (size=31255) 2024-12-12T19:35:05,062 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=274, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/17af146ca6c64191bf8eff75d98db29c 2024-12-12T19:35:05,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/60a7006ced714b8d9e269fd09e3c2afb is 50, key is test_row_0/B:col10/1734032104496/Put/seqid=0 2024-12-12T19:35:05,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T19:35:05,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742430_1606 (size=12301) 2024-12-12T19:35:05,131 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/60a7006ced714b8d9e269fd09e3c2afb 2024-12-12T19:35:05,135 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:05,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T19:35:05,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:05,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:35:05,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:05,143 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:05,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:05,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:05,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:05,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032165176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:05,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/7d233d03154149de8eb3131a83ec0faa is 50, key is test_row_0/C:col10/1734032104496/Put/seqid=0 2024-12-12T19:35:05,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742431_1607 (size=12301) 2024-12-12T19:35:05,230 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/7d233d03154149de8eb3131a83ec0faa 2024-12-12T19:35:05,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/17af146ca6c64191bf8eff75d98db29c as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/17af146ca6c64191bf8eff75d98db29c 2024-12-12T19:35:05,295 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:05,295 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T19:35:05,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:05,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:35:05,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:05,299 ERROR [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:05,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:05,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T19:35:05,327 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/17af146ca6c64191bf8eff75d98db29c, entries=150, sequenceid=274, filesize=30.5 K 2024-12-12T19:35:05,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/60a7006ced714b8d9e269fd09e3c2afb as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/60a7006ced714b8d9e269fd09e3c2afb 2024-12-12T19:35:05,381 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/60a7006ced714b8d9e269fd09e3c2afb, entries=150, sequenceid=274, filesize=12.0 K 2024-12-12T19:35:05,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/7d233d03154149de8eb3131a83ec0faa as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/7d233d03154149de8eb3131a83ec0faa 2024-12-12T19:35:05,429 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/7d233d03154149de8eb3131a83ec0faa, entries=150, sequenceid=274, filesize=12.0 K 2024-12-12T19:35:05,431 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 9e48e6f12fe9b1293921e76b13fdbb7f in 922ms, sequenceid=274, compaction requested=true 2024-12-12T19:35:05,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:05,431 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:35:05,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:35:05,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:35:05,434 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:35:05,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:35:05,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:35:05,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:35:05,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:35:05,451 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:35:05,451 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93977 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:35:05,451 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/B is initiating minor compaction (all files) 2024-12-12T19:35:05,451 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/A is initiating minor compaction (all files) 2024-12-12T19:35:05,451 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/B in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:05,451 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/c0ecc19f163a4d54a4ad0bfd272da05b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/4d919aa21d6849cbaea510d42db130ff, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/60a7006ced714b8d9e269fd09e3c2afb] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=36.2 K 2024-12-12T19:35:05,453 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:05,455 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/A in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:05,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42689 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T19:35:05,455 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/5aa8b2268c684c1abd5671913576e897, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/36ca8fec15264ecab15d0cd7f26bd21d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/17af146ca6c64191bf8eff75d98db29c] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=91.8 K 2024-12-12T19:35:05,455 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:05,455 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/5aa8b2268c684c1abd5671913576e897, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/36ca8fec15264ecab15d0cd7f26bd21d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/17af146ca6c64191bf8eff75d98db29c] 2024-12-12T19:35:05,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:05,456 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T19:35:05,456 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting c0ecc19f163a4d54a4ad0bfd272da05b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1734032100367 2024-12-12T19:35:05,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:35:05,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:05,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:35:05,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:05,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:35:05,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:05,467 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d919aa21d6849cbaea510d42db130ff, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1734032101543 2024-12-12T19:35:05,468 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5aa8b2268c684c1abd5671913576e897, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1734032100367 2024-12-12T19:35:05,472 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 60a7006ced714b8d9e269fd09e3c2afb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734032103836 2024-12-12T19:35:05,473 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36ca8fec15264ecab15d0cd7f26bd21d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1734032101543 2024-12-12T19:35:05,474 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17af146ca6c64191bf8eff75d98db29c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734032103836 2024-12-12T19:35:05,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412125743345fec7f403481b48e9018431bbe_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032104532/Put/seqid=0 2024-12-12T19:35:05,500 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#B#compaction#508 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:35:05,500 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/121c90ca624641209e0d371d9c6febf6 is 50, key is test_row_0/B:col10/1734032104496/Put/seqid=0 2024-12-12T19:35:05,504 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:05,524 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212c27e428be3574f80a5cb8a3d2e5f4857_9e48e6f12fe9b1293921e76b13fdbb7f store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:05,526 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212c27e428be3574f80a5cb8a3d2e5f4857_9e48e6f12fe9b1293921e76b13fdbb7f, store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:05,526 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c27e428be3574f80a5cb8a3d2e5f4857_9e48e6f12fe9b1293921e76b13fdbb7f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:05,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742433_1609 (size=12915) 2024-12-12T19:35:05,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742432_1608 (size=12454) 2024-12-12T19:35:05,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742434_1610 (size=4469) 2024-12-12T19:35:05,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T19:35:05,596 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#A#compaction#509 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:35:05,597 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/04e9c85780284aa9a741e6f42e81b889 is 175, key is test_row_0/A:col10/1734032104496/Put/seqid=0 2024-12-12T19:35:05,615 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/121c90ca624641209e0d371d9c6febf6 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/121c90ca624641209e0d371d9c6febf6 2024-12-12T19:35:05,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742435_1611 (size=31869) 2024-12-12T19:35:05,657 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/B of 9e48e6f12fe9b1293921e76b13fdbb7f into 121c90ca624641209e0d371d9c6febf6(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:35:05,657 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:05,657 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/B, priority=13, startTime=1734032105434; duration=0sec 2024-12-12T19:35:05,657 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:35:05,657 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:B 2024-12-12T19:35:05,657 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:35:05,664 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:35:05,664 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/C is initiating minor compaction (all files) 2024-12-12T19:35:05,664 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/C in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:05,664 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/ae6ab58c5f034c04b54f1bd62c32528a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/ba79f22132324c74bf6d24f6756572c8, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/7d233d03154149de8eb3131a83ec0faa] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=36.2 K 2024-12-12T19:35:05,665 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting ae6ab58c5f034c04b54f1bd62c32528a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1734032100367 2024-12-12T19:35:05,668 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting ba79f22132324c74bf6d24f6756572c8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1734032101543 2024-12-12T19:35:05,668 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d233d03154149de8eb3131a83ec0faa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734032103836 2024-12-12T19:35:05,670 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/04e9c85780284aa9a741e6f42e81b889 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/04e9c85780284aa9a741e6f42e81b889 2024-12-12T19:35:05,691 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/A of 9e48e6f12fe9b1293921e76b13fdbb7f into 04e9c85780284aa9a741e6f42e81b889(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:35:05,691 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:05,691 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/A, priority=13, startTime=1734032105431; duration=0sec 2024-12-12T19:35:05,692 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:35:05,692 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:A 2024-12-12T19:35:05,693 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#C#compaction#510 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:35:05,694 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/c7802ade8fc544ea9ee7d1002a4a3e12 is 50, key is test_row_0/C:col10/1734032104496/Put/seqid=0 2024-12-12T19:35:05,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. as already flushing 2024-12-12T19:35:05,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:05,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742436_1612 (size=12915) 2024-12-12T19:35:05,764 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/c7802ade8fc544ea9ee7d1002a4a3e12 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/c7802ade8fc544ea9ee7d1002a4a3e12 2024-12-12T19:35:05,778 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/C of 9e48e6f12fe9b1293921e76b13fdbb7f into c7802ade8fc544ea9ee7d1002a4a3e12(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:35:05,778 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:05,778 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/C, priority=13, startTime=1734032105437; duration=0sec 2024-12-12T19:35:05,778 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:35:05,778 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:C 2024-12-12T19:35:05,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:05,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 295 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032165880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:05,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:35:05,974 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412125743345fec7f403481b48e9018431bbe_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412125743345fec7f403481b48e9018431bbe_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:05,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/d280250c812641b5866e8f057820c864, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:05,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/d280250c812641b5866e8f057820c864 is 175, key is test_row_0/A:col10/1734032104532/Put/seqid=0 2024-12-12T19:35:05,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:05,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 297 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032165990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:05,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742437_1613 (size=31255) 2024-12-12T19:35:06,002 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=282, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/d280250c812641b5866e8f057820c864 2024-12-12T19:35:06,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/2ab9ea7f241b4668b5a8e3de70b031b3 is 50, key is test_row_0/B:col10/1734032104532/Put/seqid=0 2024-12-12T19:35:06,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742438_1614 (size=12301) 2024-12-12T19:35:06,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:06,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 299 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032166200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:06,299 DEBUG [Thread-2395 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x15b6349f to 127.0.0.1:52216 2024-12-12T19:35:06,300 DEBUG [Thread-2395 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:35:06,300 DEBUG [Thread-2393 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d832d43 to 127.0.0.1:52216 2024-12-12T19:35:06,300 DEBUG [Thread-2393 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:35:06,300 DEBUG [Thread-2401 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7287c75d to 127.0.0.1:52216 2024-12-12T19:35:06,300 DEBUG [Thread-2401 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:35:06,302 DEBUG [Thread-2397 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x439b60d5 to 127.0.0.1:52216 2024-12-12T19:35:06,302 DEBUG [Thread-2397 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:35:06,305 DEBUG [Thread-2399 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f48b1c2 to 127.0.0.1:52216 2024-12-12T19:35:06,306 DEBUG [Thread-2399 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:35:06,468 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/2ab9ea7f241b4668b5a8e3de70b031b3 2024-12-12T19:35:06,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/09d2efe0000343978f74adba5f75f567 is 50, key is test_row_0/C:col10/1734032104532/Put/seqid=0 2024-12-12T19:35:06,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742439_1615 (size=12301) 2024-12-12T19:35:06,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T19:35:06,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] ipc.CallRunner(138): callId: 301 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49080 deadline: 1734032166512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:06,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T19:35:06,900 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/09d2efe0000343978f74adba5f75f567 2024-12-12T19:35:06,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/d280250c812641b5866e8f057820c864 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/d280250c812641b5866e8f057820c864 2024-12-12T19:35:06,909 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/d280250c812641b5866e8f057820c864, entries=150, sequenceid=282, filesize=30.5 K 2024-12-12T19:35:06,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/2ab9ea7f241b4668b5a8e3de70b031b3 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/2ab9ea7f241b4668b5a8e3de70b031b3 2024-12-12T19:35:06,914 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/2ab9ea7f241b4668b5a8e3de70b031b3, entries=150, sequenceid=282, filesize=12.0 K 2024-12-12T19:35:06,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/09d2efe0000343978f74adba5f75f567 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/09d2efe0000343978f74adba5f75f567 2024-12-12T19:35:06,919 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/09d2efe0000343978f74adba5f75f567, entries=150, sequenceid=282, filesize=12.0 K 2024-12-12T19:35:06,920 INFO [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 9e48e6f12fe9b1293921e76b13fdbb7f in 1463ms, sequenceid=282, compaction requested=false 2024-12-12T19:35:06,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:06,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:06,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4c9c438b6eeb:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-12-12T19:35:06,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-12-12T19:35:06,922 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-12-12T19:35:06,922 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4400 sec 2024-12-12T19:35:06,923 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 2.4520 sec 2024-12-12T19:35:07,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42689 {}] regionserver.HRegion(8581): Flush requested on 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:07,017 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-12T19:35:07,017 DEBUG [Thread-2382 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x091d72db to 127.0.0.1:52216 2024-12-12T19:35:07,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:35:07,017 DEBUG [Thread-2382 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:35:07,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:07,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:35:07,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:07,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:35:07,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:07,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c25d34d95651458380d4d7a177db8ed1_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032105875/Put/seqid=0 2024-12-12T19:35:07,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742440_1616 (size=12454) 2024-12-12T19:35:07,060 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:35:07,103 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c25d34d95651458380d4d7a177db8ed1_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c25d34d95651458380d4d7a177db8ed1_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:07,114 DEBUG [Thread-2388 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bb6288a to 127.0.0.1:52216 2024-12-12T19:35:07,114 DEBUG [Thread-2388 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:35:07,116 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/2f4c8406a4df4b4f8299ab01fc8b1d6d, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:07,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/2f4c8406a4df4b4f8299ab01fc8b1d6d is 175, key is test_row_0/A:col10/1734032105875/Put/seqid=0 2024-12-12T19:35:07,131 DEBUG [Thread-2386 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53305d9b to 127.0.0.1:52216 2024-12-12T19:35:07,131 DEBUG [Thread-2386 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:35:07,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742441_1617 (size=31255) 2024-12-12T19:35:07,147 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=314, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/2f4c8406a4df4b4f8299ab01fc8b1d6d 2024-12-12T19:35:07,156 DEBUG [Thread-2390 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06556601 to 127.0.0.1:52216 2024-12-12T19:35:07,156 DEBUG [Thread-2390 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:35:07,181 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/1a9ad0c8a02a45e68fa2f1a74c4e321d is 50, key is test_row_0/B:col10/1734032105875/Put/seqid=0 2024-12-12T19:35:07,200 DEBUG [Thread-2384 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d836f78 to 127.0.0.1:52216 2024-12-12T19:35:07,200 DEBUG [Thread-2384 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:35:07,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742442_1618 (size=12301) 2024-12-12T19:35:07,645 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/1a9ad0c8a02a45e68fa2f1a74c4e321d 2024-12-12T19:35:07,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/2b1a4cabaf164fb6ab7d5398bc4bcfa4 is 50, key is test_row_0/C:col10/1734032105875/Put/seqid=0 2024-12-12T19:35:07,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742443_1619 (size=12301) 2024-12-12T19:35:08,073 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/2b1a4cabaf164fb6ab7d5398bc4bcfa4 2024-12-12T19:35:08,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/2f4c8406a4df4b4f8299ab01fc8b1d6d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/2f4c8406a4df4b4f8299ab01fc8b1d6d 2024-12-12T19:35:08,081 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/2f4c8406a4df4b4f8299ab01fc8b1d6d, entries=150, sequenceid=314, filesize=30.5 K 2024-12-12T19:35:08,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/1a9ad0c8a02a45e68fa2f1a74c4e321d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/1a9ad0c8a02a45e68fa2f1a74c4e321d 2024-12-12T19:35:08,086 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/1a9ad0c8a02a45e68fa2f1a74c4e321d, entries=150, sequenceid=314, filesize=12.0 K 2024-12-12T19:35:08,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/2b1a4cabaf164fb6ab7d5398bc4bcfa4 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/2b1a4cabaf164fb6ab7d5398bc4bcfa4 2024-12-12T19:35:08,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/2b1a4cabaf164fb6ab7d5398bc4bcfa4, entries=150, sequenceid=314, filesize=12.0 K 2024-12-12T19:35:08,091 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 9e48e6f12fe9b1293921e76b13fdbb7f in 1074ms, sequenceid=314, compaction requested=true 2024-12-12T19:35:08,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:08,091 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:35:08,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T19:35:08,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:35:08,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T19:35:08,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:35:08,092 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:35:08,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9e48e6f12fe9b1293921e76b13fdbb7f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T19:35:08,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:35:08,092 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94379 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:35:08,092 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/A is initiating minor compaction (all files) 2024-12-12T19:35:08,092 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/A in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:08,093 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/04e9c85780284aa9a741e6f42e81b889, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/d280250c812641b5866e8f057820c864, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/2f4c8406a4df4b4f8299ab01fc8b1d6d] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=92.2 K 2024-12-12T19:35:08,093 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:08,093 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. files: [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/04e9c85780284aa9a741e6f42e81b889, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/d280250c812641b5866e8f057820c864, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/2f4c8406a4df4b4f8299ab01fc8b1d6d] 2024-12-12T19:35:08,093 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:35:08,093 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04e9c85780284aa9a741e6f42e81b889, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734032103836 2024-12-12T19:35:08,093 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/B is initiating minor compaction (all files) 2024-12-12T19:35:08,093 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/B in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:08,093 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/121c90ca624641209e0d371d9c6febf6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/2ab9ea7f241b4668b5a8e3de70b031b3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/1a9ad0c8a02a45e68fa2f1a74c4e321d] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=36.6 K 2024-12-12T19:35:08,093 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting d280250c812641b5866e8f057820c864, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1734032104532 2024-12-12T19:35:08,094 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 121c90ca624641209e0d371d9c6febf6, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734032103836 2024-12-12T19:35:08,094 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f4c8406a4df4b4f8299ab01fc8b1d6d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1734032105871 2024-12-12T19:35:08,094 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ab9ea7f241b4668b5a8e3de70b031b3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1734032104532 2024-12-12T19:35:08,094 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a9ad0c8a02a45e68fa2f1a74c4e321d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1734032105871 2024-12-12T19:35:08,101 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#B#compaction#516 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:35:08,102 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/22545d56780741769023996cb5ddc3ef is 50, key is test_row_0/B:col10/1734032105875/Put/seqid=0 2024-12-12T19:35:08,104 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:08,116 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121274990e786201459a87e3c6d41c3ea775_9e48e6f12fe9b1293921e76b13fdbb7f store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:08,118 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121274990e786201459a87e3c6d41c3ea775_9e48e6f12fe9b1293921e76b13fdbb7f, store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:08,118 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121274990e786201459a87e3c6d41c3ea775_9e48e6f12fe9b1293921e76b13fdbb7f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:08,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742444_1620 (size=13017) 2024-12-12T19:35:08,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742445_1621 (size=4469) 2024-12-12T19:35:08,199 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/22545d56780741769023996cb5ddc3ef as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/22545d56780741769023996cb5ddc3ef 2024-12-12T19:35:08,235 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/B of 9e48e6f12fe9b1293921e76b13fdbb7f into 22545d56780741769023996cb5ddc3ef(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:35:08,235 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:08,236 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/B, priority=13, startTime=1734032108092; duration=0sec 2024-12-12T19:35:08,236 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T19:35:08,236 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:B 2024-12-12T19:35:08,236 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T19:35:08,240 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T19:35:08,240 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1540): 9e48e6f12fe9b1293921e76b13fdbb7f/C is initiating minor compaction (all files) 2024-12-12T19:35:08,240 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9e48e6f12fe9b1293921e76b13fdbb7f/C in TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:08,240 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/c7802ade8fc544ea9ee7d1002a4a3e12, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/09d2efe0000343978f74adba5f75f567, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/2b1a4cabaf164fb6ab7d5398bc4bcfa4] into tmpdir=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp, totalSize=36.6 K 2024-12-12T19:35:08,241 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting c7802ade8fc544ea9ee7d1002a4a3e12, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734032103836 2024-12-12T19:35:08,243 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 09d2efe0000343978f74adba5f75f567, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1734032104532 2024-12-12T19:35:08,243 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b1a4cabaf164fb6ab7d5398bc4bcfa4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1734032105871 2024-12-12T19:35:08,272 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#C#compaction#518 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:35:08,273 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/d02595ec26d2451185d3ad006f8033ee is 50, key is test_row_0/C:col10/1734032105875/Put/seqid=0 2024-12-12T19:35:08,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742446_1622 (size=13017) 2024-12-12T19:35:08,580 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9e48e6f12fe9b1293921e76b13fdbb7f#A#compaction#517 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T19:35:08,581 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/8a2b9321b58d4f7a815c7b82a089e497 is 175, key is test_row_0/A:col10/1734032105875/Put/seqid=0 2024-12-12T19:35:08,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742447_1623 (size=31971) 2024-12-12T19:35:08,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T19:35:08,595 INFO [Thread-2392 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-12-12T19:35:08,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T19:35:08,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 192 2024-12-12T19:35:08,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 17 2024-12-12T19:35:08,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 15 2024-12-12T19:35:08,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 14 2024-12-12T19:35:08,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 12 2024-12-12T19:35:08,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T19:35:08,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3643 2024-12-12T19:35:08,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3629 2024-12-12T19:35:08,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3829 2024-12-12T19:35:08,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3548 2024-12-12T19:35:08,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3807 2024-12-12T19:35:08,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T19:35:08,595 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T19:35:08,595 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d7fe431 to 127.0.0.1:52216 2024-12-12T19:35:08,595 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:35:08,598 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T19:35:08,599 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T19:35:08,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T19:35:08,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-12T19:35:08,601 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734032108601"}]},"ts":"1734032108601"} 2024-12-12T19:35:08,602 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T19:35:08,635 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T19:35:08,636 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T19:35:08,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9e48e6f12fe9b1293921e76b13fdbb7f, UNASSIGN}] 2024-12-12T19:35:08,637 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=168, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9e48e6f12fe9b1293921e76b13fdbb7f, UNASSIGN 2024-12-12T19:35:08,637 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=169 updating hbase:meta row=9e48e6f12fe9b1293921e76b13fdbb7f, regionState=CLOSING, regionLocation=4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:08,638 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T19:35:08,638 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; CloseRegionProcedure 9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038}] 2024-12-12T19:35:08,687 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/d02595ec26d2451185d3ad006f8033ee as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/d02595ec26d2451185d3ad006f8033ee 2024-12-12T19:35:08,692 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/C of 9e48e6f12fe9b1293921e76b13fdbb7f into d02595ec26d2451185d3ad006f8033ee(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:35:08,692 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:08,693 INFO [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/C, priority=13, startTime=1734032108092; duration=0sec 2024-12-12T19:35:08,693 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:35:08,693 DEBUG [RS:0;4c9c438b6eeb:42689-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:C 2024-12-12T19:35:08,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-12T19:35:08,790 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:08,790 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(124): Close 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:08,790 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T19:35:08,790 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1681): Closing 9e48e6f12fe9b1293921e76b13fdbb7f, disabling compactions & flushes 2024-12-12T19:35:08,790 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1942): waiting for 1 compactions to complete for region TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:08,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-12T19:35:08,993 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/8a2b9321b58d4f7a815c7b82a089e497 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/8a2b9321b58d4f7a815c7b82a089e497 2024-12-12T19:35:08,997 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9e48e6f12fe9b1293921e76b13fdbb7f/A of 9e48e6f12fe9b1293921e76b13fdbb7f into 8a2b9321b58d4f7a815c7b82a089e497(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T19:35:08,998 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:08,998 INFO [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f., storeName=9e48e6f12fe9b1293921e76b13fdbb7f/A, priority=13, startTime=1734032108091; duration=0sec 2024-12-12T19:35:08,998 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:08,998 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:08,998 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T19:35:08,998 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. after waiting 0 ms 2024-12-12T19:35:08,998 DEBUG [RS:0;4c9c438b6eeb:42689-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9e48e6f12fe9b1293921e76b13fdbb7f:A 2024-12-12T19:35:08,998 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:08,998 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(2837): Flushing 9e48e6f12fe9b1293921e76b13fdbb7f 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-12T19:35:08,998 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=A 2024-12-12T19:35:08,998 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:08,998 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=B 2024-12-12T19:35:08,998 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:08,998 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9e48e6f12fe9b1293921e76b13fdbb7f, store=C 2024-12-12T19:35:08,998 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T19:35:09,005 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212abd8264b2579494884095af6f54e7769_9e48e6f12fe9b1293921e76b13fdbb7f is 50, key is test_row_0/A:col10/1734032107151/Put/seqid=0 2024-12-12T19:35:09,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742448_1624 (size=9914) 2024-12-12T19:35:09,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-12T19:35:09,424 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T19:35:09,427 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212abd8264b2579494884095af6f54e7769_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212abd8264b2579494884095af6f54e7769_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:09,428 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/39329e3dc5434b8889e82ed6d7fec875, store: [table=TestAcidGuarantees family=A region=9e48e6f12fe9b1293921e76b13fdbb7f] 2024-12-12T19:35:09,428 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/39329e3dc5434b8889e82ed6d7fec875 is 175, key is test_row_0/A:col10/1734032107151/Put/seqid=0 2024-12-12T19:35:09,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742449_1625 (size=22561) 2024-12-12T19:35:09,442 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=324, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/39329e3dc5434b8889e82ed6d7fec875 2024-12-12T19:35:09,451 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/53fd867ae70642348796e8a16bebf3b2 is 50, key is test_row_0/B:col10/1734032107151/Put/seqid=0 2024-12-12T19:35:09,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742450_1626 (size=9857) 2024-12-12T19:35:09,492 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/53fd867ae70642348796e8a16bebf3b2 2024-12-12T19:35:09,503 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/ff6d9065a7864966ba84bae175f76236 is 50, key is test_row_0/C:col10/1734032107151/Put/seqid=0 2024-12-12T19:35:09,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742451_1627 (size=9857) 2024-12-12T19:35:09,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-12T19:35:09,936 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/ff6d9065a7864966ba84bae175f76236 2024-12-12T19:35:09,940 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/A/39329e3dc5434b8889e82ed6d7fec875 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/39329e3dc5434b8889e82ed6d7fec875 2024-12-12T19:35:09,944 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/39329e3dc5434b8889e82ed6d7fec875, entries=100, sequenceid=324, filesize=22.0 K 2024-12-12T19:35:09,945 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/B/53fd867ae70642348796e8a16bebf3b2 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/53fd867ae70642348796e8a16bebf3b2 2024-12-12T19:35:09,948 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/53fd867ae70642348796e8a16bebf3b2, entries=100, sequenceid=324, filesize=9.6 K 2024-12-12T19:35:09,949 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/.tmp/C/ff6d9065a7864966ba84bae175f76236 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/ff6d9065a7864966ba84bae175f76236 2024-12-12T19:35:09,953 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/ff6d9065a7864966ba84bae175f76236, entries=100, sequenceid=324, filesize=9.6 K 2024-12-12T19:35:09,954 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 9e48e6f12fe9b1293921e76b13fdbb7f in 955ms, sequenceid=324, compaction requested=false 2024-12-12T19:35:09,954 DEBUG [StoreCloser-TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/dee303a8daee426a9e47cb1f4171ea9c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/6be0701625234af6938b6fdc61a32911, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/9f3511a629b940379864f06b7407d2f3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/d8bcdb455a8f4d4184b4295ba7763f40, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/295b57c10dbf46f49986795cee95078c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/9780a8dd51f546b9b6b8795c5fe18e24, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/2af186e57912456e88707058631d4a5b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/4bf9d5c8a6d14cbfb41d117ded95da34, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/e69a72d8a3cc4c47abafafb2dafc7a2f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/3c6ec2374bb44018801d737f671c6899, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/418abfb6b1d8438d9746addc0e11ab4d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/cc1ed44478f6442a9834018235fc0e36, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/ef98435d8fbe4c4dbb306487d441268e, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/976e01c8b3f24b4a908c91621fb3b4fb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/5aa8b2268c684c1abd5671913576e897, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/e1ee304881de404b88eb4ecc083bb6de, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/36ca8fec15264ecab15d0cd7f26bd21d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/04e9c85780284aa9a741e6f42e81b889, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/17af146ca6c64191bf8eff75d98db29c, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/d280250c812641b5866e8f057820c864, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/2f4c8406a4df4b4f8299ab01fc8b1d6d] to archive 2024-12-12T19:35:09,955 DEBUG [StoreCloser-TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:35:09,957 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/9f3511a629b940379864f06b7407d2f3 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/9f3511a629b940379864f06b7407d2f3 2024-12-12T19:35:09,958 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/6be0701625234af6938b6fdc61a32911 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/6be0701625234af6938b6fdc61a32911 2024-12-12T19:35:09,958 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/dee303a8daee426a9e47cb1f4171ea9c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/dee303a8daee426a9e47cb1f4171ea9c 2024-12-12T19:35:09,958 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/9780a8dd51f546b9b6b8795c5fe18e24 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/9780a8dd51f546b9b6b8795c5fe18e24 2024-12-12T19:35:09,958 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/d8bcdb455a8f4d4184b4295ba7763f40 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/d8bcdb455a8f4d4184b4295ba7763f40 2024-12-12T19:35:09,958 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/4bf9d5c8a6d14cbfb41d117ded95da34 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/4bf9d5c8a6d14cbfb41d117ded95da34 2024-12-12T19:35:09,958 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/295b57c10dbf46f49986795cee95078c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/295b57c10dbf46f49986795cee95078c 2024-12-12T19:35:09,958 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/2af186e57912456e88707058631d4a5b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/2af186e57912456e88707058631d4a5b 2024-12-12T19:35:09,960 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/ef98435d8fbe4c4dbb306487d441268e to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/ef98435d8fbe4c4dbb306487d441268e 2024-12-12T19:35:09,960 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/418abfb6b1d8438d9746addc0e11ab4d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/418abfb6b1d8438d9746addc0e11ab4d 2024-12-12T19:35:09,961 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/cc1ed44478f6442a9834018235fc0e36 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/cc1ed44478f6442a9834018235fc0e36 2024-12-12T19:35:09,961 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/5aa8b2268c684c1abd5671913576e897 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/5aa8b2268c684c1abd5671913576e897 2024-12-12T19:35:09,961 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/e1ee304881de404b88eb4ecc083bb6de to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/e1ee304881de404b88eb4ecc083bb6de 2024-12-12T19:35:09,961 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/976e01c8b3f24b4a908c91621fb3b4fb to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/976e01c8b3f24b4a908c91621fb3b4fb 2024-12-12T19:35:09,961 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/e69a72d8a3cc4c47abafafb2dafc7a2f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/e69a72d8a3cc4c47abafafb2dafc7a2f 2024-12-12T19:35:09,962 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/3c6ec2374bb44018801d737f671c6899 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/3c6ec2374bb44018801d737f671c6899 2024-12-12T19:35:09,969 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/36ca8fec15264ecab15d0cd7f26bd21d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/36ca8fec15264ecab15d0cd7f26bd21d 2024-12-12T19:35:09,969 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/04e9c85780284aa9a741e6f42e81b889 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/04e9c85780284aa9a741e6f42e81b889 2024-12-12T19:35:09,969 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/17af146ca6c64191bf8eff75d98db29c to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/17af146ca6c64191bf8eff75d98db29c 2024-12-12T19:35:09,970 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/d280250c812641b5866e8f057820c864 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/d280250c812641b5866e8f057820c864 2024-12-12T19:35:09,975 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/2f4c8406a4df4b4f8299ab01fc8b1d6d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/2f4c8406a4df4b4f8299ab01fc8b1d6d 2024-12-12T19:35:09,981 DEBUG [StoreCloser-TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/cedab7db78d240bba776607200be2f21, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/f364a43ae5ce4f73a5b11594bd110b6f, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/18dc33f0d7ce4a6899fe7a4492681ccf, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/a692613c1db74f29b08fe8695a19d986, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/03c6edbf1eec44d491e571ba621acd19, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/1697b2d886524decae15e22fff838dda, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/8fa05eebfa5742bea58ceed677b7f376, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/235b09b7413c48ce982c38238d24c91b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/d9ba14436c214541898393449b8134fe, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/eba9d637348240a59597119a113b9108, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/bdbab00ebc8844638bed16475c3ba0da, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/b8a2a251cd9346e9bff6ce22ed69812d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/bb20be55cfa440e39cfe94bb42f66971, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/c452ec0293ba4bc383cf4db4603d10ba, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/c0ecc19f163a4d54a4ad0bfd272da05b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/fffe1d32552a4863aa580afa0763fbe2, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/4d919aa21d6849cbaea510d42db130ff, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/121c90ca624641209e0d371d9c6febf6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/60a7006ced714b8d9e269fd09e3c2afb, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/2ab9ea7f241b4668b5a8e3de70b031b3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/1a9ad0c8a02a45e68fa2f1a74c4e321d] to archive 2024-12-12T19:35:09,982 DEBUG [StoreCloser-TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:35:09,997 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/cedab7db78d240bba776607200be2f21 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/cedab7db78d240bba776607200be2f21 2024-12-12T19:35:09,997 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/8fa05eebfa5742bea58ceed677b7f376 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/8fa05eebfa5742bea58ceed677b7f376 2024-12-12T19:35:09,997 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/03c6edbf1eec44d491e571ba621acd19 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/03c6edbf1eec44d491e571ba621acd19 2024-12-12T19:35:09,997 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/1697b2d886524decae15e22fff838dda to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/1697b2d886524decae15e22fff838dda 2024-12-12T19:35:09,998 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/235b09b7413c48ce982c38238d24c91b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/235b09b7413c48ce982c38238d24c91b 2024-12-12T19:35:09,998 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/f364a43ae5ce4f73a5b11594bd110b6f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/f364a43ae5ce4f73a5b11594bd110b6f 2024-12-12T19:35:09,998 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/18dc33f0d7ce4a6899fe7a4492681ccf to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/18dc33f0d7ce4a6899fe7a4492681ccf 2024-12-12T19:35:09,998 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/a692613c1db74f29b08fe8695a19d986 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/a692613c1db74f29b08fe8695a19d986 2024-12-12T19:35:10,001 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/eba9d637348240a59597119a113b9108 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/eba9d637348240a59597119a113b9108 2024-12-12T19:35:10,001 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/bdbab00ebc8844638bed16475c3ba0da to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/bdbab00ebc8844638bed16475c3ba0da 2024-12-12T19:35:10,001 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/b8a2a251cd9346e9bff6ce22ed69812d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/b8a2a251cd9346e9bff6ce22ed69812d 2024-12-12T19:35:10,005 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/d9ba14436c214541898393449b8134fe to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/d9ba14436c214541898393449b8134fe 2024-12-12T19:35:10,005 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/bb20be55cfa440e39cfe94bb42f66971 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/bb20be55cfa440e39cfe94bb42f66971 2024-12-12T19:35:10,005 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/c0ecc19f163a4d54a4ad0bfd272da05b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/c0ecc19f163a4d54a4ad0bfd272da05b 2024-12-12T19:35:10,005 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/fffe1d32552a4863aa580afa0763fbe2 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/fffe1d32552a4863aa580afa0763fbe2 2024-12-12T19:35:10,006 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/c452ec0293ba4bc383cf4db4603d10ba to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/c452ec0293ba4bc383cf4db4603d10ba 2024-12-12T19:35:10,007 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/121c90ca624641209e0d371d9c6febf6 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/121c90ca624641209e0d371d9c6febf6 2024-12-12T19:35:10,007 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/4d919aa21d6849cbaea510d42db130ff to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/4d919aa21d6849cbaea510d42db130ff 2024-12-12T19:35:10,007 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/60a7006ced714b8d9e269fd09e3c2afb to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/60a7006ced714b8d9e269fd09e3c2afb 2024-12-12T19:35:10,008 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/1a9ad0c8a02a45e68fa2f1a74c4e321d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/1a9ad0c8a02a45e68fa2f1a74c4e321d 2024-12-12T19:35:10,008 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/2ab9ea7f241b4668b5a8e3de70b031b3 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/2ab9ea7f241b4668b5a8e3de70b031b3 2024-12-12T19:35:10,027 DEBUG [StoreCloser-TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/065ae5e8b62142af999bc03c1d35b7f5, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/e6f738f6d1c34fa38ca2df27b68a692b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/500cf24034604fa9ad968f642f47ac9d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/d7ecc28e444e4be28dd0d0f827bf547b, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/740174a23489423bb0f9acadd35e2cca, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/542f862409914b109b56a0e49c0813cc, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/f410c456c1864c75a79630c06e37b155, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/15021bee9ead454591e3bc7d0e44c057, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/3b65ee521e684c5caf909f1a4768384d, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/686c4fa008ab43849f238bf4b6bcf305, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/7527a602019f41b695160f0b80f1abe6, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/097bcb47c8194fefb435ba4de2b56846, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/93a35425e3ef41fbaf5dada52fe4d8c3, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/b5b3a1ca6c6d48e4a787a2770e7d8406, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/ae6ab58c5f034c04b54f1bd62c32528a, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/38876bb80fc74038a38e09599fa29ce7, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/ba79f22132324c74bf6d24f6756572c8, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/c7802ade8fc544ea9ee7d1002a4a3e12, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/7d233d03154149de8eb3131a83ec0faa, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/09d2efe0000343978f74adba5f75f567, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/2b1a4cabaf164fb6ab7d5398bc4bcfa4] to archive 2024-12-12T19:35:10,039 DEBUG [StoreCloser-TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T19:35:10,044 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/740174a23489423bb0f9acadd35e2cca to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/740174a23489423bb0f9acadd35e2cca 2024-12-12T19:35:10,044 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/d7ecc28e444e4be28dd0d0f827bf547b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/d7ecc28e444e4be28dd0d0f827bf547b 2024-12-12T19:35:10,045 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/f410c456c1864c75a79630c06e37b155 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/f410c456c1864c75a79630c06e37b155 2024-12-12T19:35:10,045 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/542f862409914b109b56a0e49c0813cc to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/542f862409914b109b56a0e49c0813cc 2024-12-12T19:35:10,045 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/500cf24034604fa9ad968f642f47ac9d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/500cf24034604fa9ad968f642f47ac9d 2024-12-12T19:35:10,045 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/065ae5e8b62142af999bc03c1d35b7f5 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/065ae5e8b62142af999bc03c1d35b7f5 2024-12-12T19:35:10,046 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/e6f738f6d1c34fa38ca2df27b68a692b to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/e6f738f6d1c34fa38ca2df27b68a692b 2024-12-12T19:35:10,046 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/15021bee9ead454591e3bc7d0e44c057 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/15021bee9ead454591e3bc7d0e44c057 2024-12-12T19:35:10,049 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/93a35425e3ef41fbaf5dada52fe4d8c3 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/93a35425e3ef41fbaf5dada52fe4d8c3 2024-12-12T19:35:10,049 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/7527a602019f41b695160f0b80f1abe6 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/7527a602019f41b695160f0b80f1abe6 2024-12-12T19:35:10,049 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/3b65ee521e684c5caf909f1a4768384d to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/3b65ee521e684c5caf909f1a4768384d 2024-12-12T19:35:10,049 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/097bcb47c8194fefb435ba4de2b56846 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/097bcb47c8194fefb435ba4de2b56846 2024-12-12T19:35:10,049 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/686c4fa008ab43849f238bf4b6bcf305 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/686c4fa008ab43849f238bf4b6bcf305 2024-12-12T19:35:10,050 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/b5b3a1ca6c6d48e4a787a2770e7d8406 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/b5b3a1ca6c6d48e4a787a2770e7d8406 2024-12-12T19:35:10,051 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/ae6ab58c5f034c04b54f1bd62c32528a to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/ae6ab58c5f034c04b54f1bd62c32528a 2024-12-12T19:35:10,051 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/38876bb80fc74038a38e09599fa29ce7 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/38876bb80fc74038a38e09599fa29ce7 2024-12-12T19:35:10,052 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/c7802ade8fc544ea9ee7d1002a4a3e12 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/c7802ade8fc544ea9ee7d1002a4a3e12 2024-12-12T19:35:10,053 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/7d233d03154149de8eb3131a83ec0faa to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/7d233d03154149de8eb3131a83ec0faa 2024-12-12T19:35:10,053 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/ba79f22132324c74bf6d24f6756572c8 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/ba79f22132324c74bf6d24f6756572c8 2024-12-12T19:35:10,053 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/2b1a4cabaf164fb6ab7d5398bc4bcfa4 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/2b1a4cabaf164fb6ab7d5398bc4bcfa4 2024-12-12T19:35:10,059 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/09d2efe0000343978f74adba5f75f567 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/09d2efe0000343978f74adba5f75f567 2024-12-12T19:35:10,095 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/recovered.edits/327.seqid, newMaxSeqId=327, maxSeqId=4 2024-12-12T19:35:10,100 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f. 2024-12-12T19:35:10,100 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1635): Region close journal for 9e48e6f12fe9b1293921e76b13fdbb7f: 2024-12-12T19:35:10,114 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(170): Closed 9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,115 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=169 updating hbase:meta row=9e48e6f12fe9b1293921e76b13fdbb7f, regionState=CLOSED 2024-12-12T19:35:10,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-12-12T19:35:10,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; CloseRegionProcedure 9e48e6f12fe9b1293921e76b13fdbb7f, server=4c9c438b6eeb,42689,1734031923038 in 1.4780 sec 2024-12-12T19:35:10,122 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=168 2024-12-12T19:35:10,122 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=168, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9e48e6f12fe9b1293921e76b13fdbb7f, UNASSIGN in 1.4840 sec 2024-12-12T19:35:10,123 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-12T19:35:10,123 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4860 sec 2024-12-12T19:35:10,128 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734032110128"}]},"ts":"1734032110128"} 2024-12-12T19:35:10,129 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T19:35:10,143 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T19:35:10,144 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5450 sec 2024-12-12T19:35:10,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-12T19:35:10,706 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-12-12T19:35:10,706 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T19:35:10,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:35:10,708 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=171, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:35:10,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-12T19:35:10,709 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=171, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:35:10,710 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,712 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C, FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/recovered.edits] 2024-12-12T19:35:10,715 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/8a2b9321b58d4f7a815c7b82a089e497 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/8a2b9321b58d4f7a815c7b82a089e497 2024-12-12T19:35:10,715 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/39329e3dc5434b8889e82ed6d7fec875 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/A/39329e3dc5434b8889e82ed6d7fec875 2024-12-12T19:35:10,717 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/22545d56780741769023996cb5ddc3ef to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/22545d56780741769023996cb5ddc3ef 2024-12-12T19:35:10,717 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/53fd867ae70642348796e8a16bebf3b2 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/B/53fd867ae70642348796e8a16bebf3b2 2024-12-12T19:35:10,720 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/ff6d9065a7864966ba84bae175f76236 to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/ff6d9065a7864966ba84bae175f76236 2024-12-12T19:35:10,720 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/d02595ec26d2451185d3ad006f8033ee to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/C/d02595ec26d2451185d3ad006f8033ee 2024-12-12T19:35:10,723 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/recovered.edits/327.seqid to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f/recovered.edits/327.seqid 2024-12-12T19:35:10,724 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/default/TestAcidGuarantees/9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,724 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T19:35:10,725 DEBUG [PEWorker-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T19:35:10,726 DEBUG [PEWorker-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-12T19:35:10,732 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121226be882e61fd4e51b71640f33dea7ae6_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121226be882e61fd4e51b71640f33dea7ae6_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,732 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121211f7fbdedc1f47b284d1156609b8310a_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121211f7fbdedc1f47b284d1156609b8310a_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,732 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120fbea9a1c51c40dfba64a48141c4ba9b_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120fbea9a1c51c40dfba64a48141c4ba9b_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,733 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121213176b6c40c9427fa346f2dd7aaff344_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121213176b6c40c9427fa346f2dd7aaff344_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,733 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412122960061add0046778f8a922277e045df_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412122960061add0046778f8a922277e045df_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,733 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121229a3cab843754b4cacb455e52a19af7b_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121229a3cab843754b4cacb455e52a19af7b_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,733 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124d395bc5d86d48a8b497c2bd94009f29_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124d395bc5d86d48a8b497c2bd94009f29_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,733 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412125743345fec7f403481b48e9018431bbe_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412125743345fec7f403481b48e9018431bbe_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,734 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212744ce8edf17545e2b64b578bfdae8170_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212744ce8edf17545e2b64b578bfdae8170_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,734 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212707984fa84e24609aca1879060042e89_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212707984fa84e24609aca1879060042e89_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,734 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212abd8264b2579494884095af6f54e7769_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212abd8264b2579494884095af6f54e7769_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,734 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a645902245124d9ea0f7bf3e147bc812_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a645902245124d9ea0f7bf3e147bc812_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,735 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212cb019164df1942ea90ea169d628781b6_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212cb019164df1942ea90ea169d628781b6_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,735 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e6d82d50976149adbe0e4218f79bcfb0_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e6d82d50976149adbe0e4218f79bcfb0_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,735 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c25d34d95651458380d4d7a177db8ed1_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c25d34d95651458380d4d7a177db8ed1_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,735 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212eb2c2ec80cbf43b8b55baf070f40302e_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212eb2c2ec80cbf43b8b55baf070f40302e_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,739 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212fd1e6183d0d440b0863d29affe9cd464_9e48e6f12fe9b1293921e76b13fdbb7f to hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212fd1e6183d0d440b0863d29affe9cd464_9e48e6f12fe9b1293921e76b13fdbb7f 2024-12-12T19:35:10,741 DEBUG [PEWorker-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T19:35:10,744 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=171, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:35:10,750 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T19:35:10,755 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T19:35:10,767 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=171, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:35:10,767 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T19:35:10,768 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734032110767"}]},"ts":"9223372036854775807"} 2024-12-12T19:35:10,777 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T19:35:10,777 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 9e48e6f12fe9b1293921e76b13fdbb7f, NAME => 'TestAcidGuarantees,,1734032082945.9e48e6f12fe9b1293921e76b13fdbb7f.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T19:35:10,778 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T19:35:10,778 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734032110778"}]},"ts":"9223372036854775807"} 2024-12-12T19:35:10,780 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T19:35:10,801 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=171, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T19:35:10,802 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 95 msec 2024-12-12T19:35:10,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40199 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-12T19:35:10,809 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-12-12T19:35:10,822 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=245 (was 244) - Thread LEAK? -, OpenFileDescriptor=461 (was 453) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1531 (was 1501) - SystemLoadAverage LEAK? -, ProcessCount=9 (was 9), AvailableMemoryMB=9437 (was 9762) 2024-12-12T19:35:10,822 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-12T19:35:10,822 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T19:35:10,822 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e83c466 to 127.0.0.1:52216 2024-12-12T19:35:10,822 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:35:10,822 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-12T19:35:10,822 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1390920325, stopped=false 2024-12-12T19:35:10,822 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=4c9c438b6eeb,40199,1734031921750 2024-12-12T19:35:10,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T19:35:10,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T19:35:10,826 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-12T19:35:10,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:35:10,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:35:10,827 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T19:35:10,827 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T19:35:10,827 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:35:10,827 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '4c9c438b6eeb,42689,1734031923038' ***** 2024-12-12T19:35:10,827 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-12T19:35:10,827 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-12T19:35:10,828 INFO [RS:0;4c9c438b6eeb:42689 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-12T19:35:10,828 INFO [RS:0;4c9c438b6eeb:42689 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-12T19:35:10,828 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(3579): Received CLOSE for 2c541955553f42ed357f6055374132eb 2024-12-12T19:35:10,828 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1224): stopping server 4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:10,828 DEBUG [RS:0;4c9c438b6eeb:42689 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:35:10,828 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-12T19:35:10,828 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-12T19:35:10,828 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-12T19:35:10,829 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-12T19:35:10,829 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-12T19:35:10,829 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-12T19:35:10,829 DEBUG [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1603): Online Regions={2c541955553f42ed357f6055374132eb=hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb., 1588230740=hbase:meta,,1.1588230740} 2024-12-12T19:35:10,829 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 2c541955553f42ed357f6055374132eb, disabling compactions & flushes 2024-12-12T19:35:10,829 DEBUG [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-12T19:35:10,829 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb. 2024-12-12T19:35:10,829 INFO [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-12T19:35:10,829 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb. 2024-12-12T19:35:10,829 DEBUG [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-12T19:35:10,829 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb. after waiting 0 ms 2024-12-12T19:35:10,829 DEBUG [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-12T19:35:10,829 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb. 2024-12-12T19:35:10,829 DEBUG [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-12T19:35:10,829 DEBUG [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 2c541955553f42ed357f6055374132eb 2024-12-12T19:35:10,829 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 2c541955553f42ed357f6055374132eb 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-12T19:35:10,829 INFO [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-12T19:35:10,854 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/namespace/2c541955553f42ed357f6055374132eb/.tmp/info/890bad8d60f04581854146201a34731b is 45, key is default/info:d/1734031930190/Put/seqid=0 2024-12-12T19:35:10,865 DEBUG [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/.tmp/info/5e212517dfed4b75b27c4eb164a5585f is 143, key is hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb./info:regioninfo/1734031929591/Put/seqid=0 2024-12-12T19:35:10,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742452_1628 (size=5037) 2024-12-12T19:35:10,872 INFO [regionserver/4c9c438b6eeb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T19:35:10,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742453_1629 (size=7725) 2024-12-12T19:35:10,885 INFO [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/.tmp/info/5e212517dfed4b75b27c4eb164a5585f 2024-12-12T19:35:10,912 DEBUG [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/.tmp/rep_barrier/451ebc160b654806ac2df00d2da92129 is 102, key is TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5./rep_barrier:/1734031959709/DeleteFamily/seqid=0 2024-12-12T19:35:10,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742454_1630 (size=6025) 2024-12-12T19:35:11,030 DEBUG [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 2c541955553f42ed357f6055374132eb 2024-12-12T19:35:11,073 INFO [regionserver/4c9c438b6eeb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-12T19:35:11,073 INFO [regionserver/4c9c438b6eeb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-12T19:35:11,230 DEBUG [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 2c541955553f42ed357f6055374132eb 2024-12-12T19:35:11,264 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/namespace/2c541955553f42ed357f6055374132eb/.tmp/info/890bad8d60f04581854146201a34731b 2024-12-12T19:35:11,267 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/namespace/2c541955553f42ed357f6055374132eb/.tmp/info/890bad8d60f04581854146201a34731b as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/namespace/2c541955553f42ed357f6055374132eb/info/890bad8d60f04581854146201a34731b 2024-12-12T19:35:11,271 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/namespace/2c541955553f42ed357f6055374132eb/info/890bad8d60f04581854146201a34731b, entries=2, sequenceid=6, filesize=4.9 K 2024-12-12T19:35:11,272 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 2c541955553f42ed357f6055374132eb in 443ms, sequenceid=6, compaction requested=false 2024-12-12T19:35:11,276 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/namespace/2c541955553f42ed357f6055374132eb/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T19:35:11,278 INFO [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb. 2024-12-12T19:35:11,278 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 2c541955553f42ed357f6055374132eb: 2024-12-12T19:35:11,278 DEBUG [RS_CLOSE_REGION-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734031928599.2c541955553f42ed357f6055374132eb. 2024-12-12T19:35:11,348 INFO [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/.tmp/rep_barrier/451ebc160b654806ac2df00d2da92129 2024-12-12T19:35:11,377 DEBUG [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/.tmp/table/83f82a1033a842af99b3401e399c21f9 is 96, key is TestAcidGuarantees,,1734031930719.6ffb87fb734b5d4ed7499f1da86f79f5./table:/1734031959709/DeleteFamily/seqid=0 2024-12-12T19:35:11,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742455_1631 (size=5942) 2024-12-12T19:35:11,430 DEBUG [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-12T19:35:11,631 DEBUG [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-12T19:35:11,795 INFO [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/.tmp/table/83f82a1033a842af99b3401e399c21f9 2024-12-12T19:35:11,825 DEBUG [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/.tmp/info/5e212517dfed4b75b27c4eb164a5585f as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/info/5e212517dfed4b75b27c4eb164a5585f 2024-12-12T19:35:11,828 INFO [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/info/5e212517dfed4b75b27c4eb164a5585f, entries=22, sequenceid=93, filesize=7.5 K 2024-12-12T19:35:11,829 DEBUG [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/.tmp/rep_barrier/451ebc160b654806ac2df00d2da92129 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/rep_barrier/451ebc160b654806ac2df00d2da92129 2024-12-12T19:35:11,831 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-12T19:35:11,831 DEBUG [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-12T19:35:11,831 DEBUG [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-12T19:35:11,833 INFO [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/rep_barrier/451ebc160b654806ac2df00d2da92129, entries=6, sequenceid=93, filesize=5.9 K 2024-12-12T19:35:11,834 DEBUG [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/.tmp/table/83f82a1033a842af99b3401e399c21f9 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/table/83f82a1033a842af99b3401e399c21f9 2024-12-12T19:35:11,837 INFO [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/table/83f82a1033a842af99b3401e399c21f9, entries=9, sequenceid=93, filesize=5.8 K 2024-12-12T19:35:11,838 INFO [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1008ms, sequenceid=93, compaction requested=false 2024-12-12T19:35:11,892 DEBUG [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-12T19:35:11,892 DEBUG [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T19:35:11,893 INFO [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-12T19:35:11,893 DEBUG [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-12T19:35:11,893 DEBUG [RS_CLOSE_META-regionserver/4c9c438b6eeb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-12T19:35:12,031 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1250): stopping server 4c9c438b6eeb,42689,1734031923038; all regions closed. 2024-12-12T19:35:12,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741834_1010 (size=26050) 2024-12-12T19:35:12,038 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(743): complete file /user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/WALs/4c9c438b6eeb,42689,1734031923038/4c9c438b6eeb%2C42689%2C1734031923038.meta.1734031927983.meta not finished, retry = 0 2024-12-12T19:35:12,142 DEBUG [RS:0;4c9c438b6eeb:42689 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/oldWALs 2024-12-12T19:35:12,142 INFO [RS:0;4c9c438b6eeb:42689 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 4c9c438b6eeb%2C42689%2C1734031923038.meta:.meta(num 1734031927983) 2024-12-12T19:35:12,153 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(743): complete file /user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/WALs/4c9c438b6eeb,42689,1734031923038/4c9c438b6eeb%2C42689%2C1734031923038.1734031927365 not finished, retry = 0 2024-12-12T19:35:12,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741833_1009 (size=14027968) 2024-12-12T19:35:12,255 DEBUG [RS:0;4c9c438b6eeb:42689 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/oldWALs 2024-12-12T19:35:12,255 INFO [RS:0;4c9c438b6eeb:42689 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 4c9c438b6eeb%2C42689%2C1734031923038:(num 1734031927365) 2024-12-12T19:35:12,255 DEBUG [RS:0;4c9c438b6eeb:42689 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:35:12,255 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T19:35:12,255 INFO [RS:0;4c9c438b6eeb:42689 {}] hbase.ChoreService(370): Chore service for: regionserver/4c9c438b6eeb:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-12T19:35:12,255 INFO [regionserver/4c9c438b6eeb:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-12T19:35:12,256 INFO [RS:0;4c9c438b6eeb:42689 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42689 2024-12-12T19:35:12,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T19:35:12,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4c9c438b6eeb,42689,1734031923038 2024-12-12T19:35:12,294 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4c9c438b6eeb,42689,1734031923038] 2024-12-12T19:35:12,294 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 4c9c438b6eeb,42689,1734031923038; numProcessing=1 2024-12-12T19:35:12,301 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/4c9c438b6eeb,42689,1734031923038 already deleted, retry=false 2024-12-12T19:35:12,301 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 4c9c438b6eeb,42689,1734031923038 expired; onlineServers=0 2024-12-12T19:35:12,301 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '4c9c438b6eeb,40199,1734031921750' ***** 2024-12-12T19:35:12,301 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-12T19:35:12,302 DEBUG [M:0;4c9c438b6eeb:40199 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b2972ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4c9c438b6eeb/172.17.0.2:0 2024-12-12T19:35:12,302 INFO [M:0;4c9c438b6eeb:40199 {}] regionserver.HRegionServer(1224): stopping server 4c9c438b6eeb,40199,1734031921750 2024-12-12T19:35:12,302 INFO [M:0;4c9c438b6eeb:40199 {}] regionserver.HRegionServer(1250): stopping server 4c9c438b6eeb,40199,1734031921750; all regions closed. 2024-12-12T19:35:12,302 DEBUG [M:0;4c9c438b6eeb:40199 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T19:35:12,302 DEBUG [M:0;4c9c438b6eeb:40199 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-12T19:35:12,302 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-12T19:35:12,302 DEBUG [M:0;4c9c438b6eeb:40199 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-12T19:35:12,302 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster-HFileCleaner.large.0-1734031926719 {}] cleaner.HFileCleaner(306): Exit Thread[master/4c9c438b6eeb:0:becomeActiveMaster-HFileCleaner.large.0-1734031926719,5,FailOnTimeoutGroup] 2024-12-12T19:35:12,302 DEBUG [master/4c9c438b6eeb:0:becomeActiveMaster-HFileCleaner.small.0-1734031926721 {}] cleaner.HFileCleaner(306): Exit Thread[master/4c9c438b6eeb:0:becomeActiveMaster-HFileCleaner.small.0-1734031926721,5,FailOnTimeoutGroup] 2024-12-12T19:35:12,302 INFO [M:0;4c9c438b6eeb:40199 {}] hbase.ChoreService(370): Chore service for: master/4c9c438b6eeb:0 had [] on shutdown 2024-12-12T19:35:12,302 DEBUG [M:0;4c9c438b6eeb:40199 {}] master.HMaster(1733): Stopping service threads 2024-12-12T19:35:12,302 INFO [M:0;4c9c438b6eeb:40199 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-12T19:35:12,303 INFO [M:0;4c9c438b6eeb:40199 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-12T19:35:12,303 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-12T19:35:12,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-12T19:35:12,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T19:35:12,310 DEBUG [M:0;4c9c438b6eeb:40199 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/master already deleted, retry=false 2024-12-12T19:35:12,310 DEBUG [M:0;4c9c438b6eeb:40199 {}] master.ActiveMasterManager(353): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-12T19:35:12,310 INFO [M:0;4c9c438b6eeb:40199 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-12T19:35:12,310 INFO [M:0;4c9c438b6eeb:40199 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-12T19:35:12,310 DEBUG [M:0;4c9c438b6eeb:40199 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-12T19:35:12,310 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T19:35:12,310 INFO [M:0;4c9c438b6eeb:40199 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T19:35:12,310 DEBUG [M:0;4c9c438b6eeb:40199 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T19:35:12,310 DEBUG [M:0;4c9c438b6eeb:40199 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-12T19:35:12,310 DEBUG [M:0;4c9c438b6eeb:40199 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T19:35:12,310 INFO [M:0;4c9c438b6eeb:40199 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=788.54 KB heapSize=971.67 KB 2024-12-12T19:35:12,331 DEBUG [M:0;4c9c438b6eeb:40199 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/245cce0f8de148f9b222b505161534fa is 82, key is hbase:meta,,1/info:regioninfo/1734031928305/Put/seqid=0 2024-12-12T19:35:12,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742456_1632 (size=5672) 2024-12-12T19:35:12,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T19:35:12,401 INFO [RS:0;4c9c438b6eeb:42689 {}] regionserver.HRegionServer(1307): Exiting; stopping=4c9c438b6eeb,42689,1734031923038; zookeeper connection closed. 2024-12-12T19:35:12,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42689-0x1001bba6bd70001, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T19:35:12,402 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@fb2460e {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@fb2460e 2024-12-12T19:35:12,403 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-12T19:35:12,558 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T19:35:12,559 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-12T19:35:12,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-12T19:35:12,561 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-12T19:35:12,737 INFO [M:0;4c9c438b6eeb:40199 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2268 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/245cce0f8de148f9b222b505161534fa 2024-12-12T19:35:12,766 DEBUG [M:0;4c9c438b6eeb:40199 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3f49536f3fa14adc87ea01e7ee694394 is 2284, key is \x00\x00\x00\x00\x00\x00\x00"/proc:d/1734031963504/Put/seqid=0 2024-12-12T19:35:12,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742457_1633 (size=43723) 2024-12-12T19:35:12,793 INFO [M:0;4c9c438b6eeb:40199 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=787.98 KB at sequenceid=2268 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3f49536f3fa14adc87ea01e7ee694394 2024-12-12T19:35:12,804 INFO [M:0;4c9c438b6eeb:40199 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3f49536f3fa14adc87ea01e7ee694394 2024-12-12T19:35:12,850 DEBUG [M:0;4c9c438b6eeb:40199 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0dd6d08e0d684f6189705436ac95742d is 69, key is 4c9c438b6eeb,42689,1734031923038/rs:state/1734031926792/Put/seqid=0 2024-12-12T19:35:12,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073742458_1634 (size=5156) 2024-12-12T19:35:12,923 INFO [M:0;4c9c438b6eeb:40199 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2268 (bloomFilter=true), to=hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0dd6d08e0d684f6189705436ac95742d 2024-12-12T19:35:12,958 DEBUG [M:0;4c9c438b6eeb:40199 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/245cce0f8de148f9b222b505161534fa as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/245cce0f8de148f9b222b505161534fa 2024-12-12T19:35:12,993 INFO [M:0;4c9c438b6eeb:40199 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/245cce0f8de148f9b222b505161534fa, entries=8, sequenceid=2268, filesize=5.5 K 2024-12-12T19:35:12,997 DEBUG [M:0;4c9c438b6eeb:40199 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3f49536f3fa14adc87ea01e7ee694394 as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3f49536f3fa14adc87ea01e7ee694394 2024-12-12T19:35:13,012 INFO [M:0;4c9c438b6eeb:40199 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3f49536f3fa14adc87ea01e7ee694394 2024-12-12T19:35:13,012 INFO [M:0;4c9c438b6eeb:40199 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3f49536f3fa14adc87ea01e7ee694394, entries=171, sequenceid=2268, filesize=42.7 K 2024-12-12T19:35:13,013 DEBUG [M:0;4c9c438b6eeb:40199 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0dd6d08e0d684f6189705436ac95742d as hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0dd6d08e0d684f6189705436ac95742d 2024-12-12T19:35:13,019 INFO [M:0;4c9c438b6eeb:40199 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38311/user/jenkins/test-data/482833ff-e16a-4c62-c863-a898fd9a1b98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0dd6d08e0d684f6189705436ac95742d, entries=1, sequenceid=2268, filesize=5.0 K 2024-12-12T19:35:13,023 INFO [M:0;4c9c438b6eeb:40199 {}] regionserver.HRegion(3040): Finished flush of dataSize ~788.54 KB/807463, heapSize ~971.38 KB/994688, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 713ms, sequenceid=2268, compaction requested=false 2024-12-12T19:35:13,047 INFO [M:0;4c9c438b6eeb:40199 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T19:35:13,047 DEBUG [M:0;4c9c438b6eeb:40199 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-12T19:35:13,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741830_1006 (size=956090) 2024-12-12T19:35:13,071 INFO [M:0;4c9c438b6eeb:40199 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-12T19:35:13,071 INFO [M:0;4c9c438b6eeb:40199 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:40199 2024-12-12T19:35:13,072 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-12T19:35:13,084 DEBUG [M:0;4c9c438b6eeb:40199 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/4c9c438b6eeb,40199,1734031921750 already deleted, retry=false 2024-12-12T19:35:13,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T19:35:13,193 INFO [M:0;4c9c438b6eeb:40199 {}] regionserver.HRegionServer(1307): Exiting; stopping=4c9c438b6eeb,40199,1734031921750; zookeeper connection closed. 2024-12-12T19:35:13,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40199-0x1001bba6bd70000, quorum=127.0.0.1:52216, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T19:35:13,217 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T19:35:13,231 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T19:35:13,231 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T19:35:13,232 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T19:35:13,232 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/hadoop.log.dir/,STOPPED} 2024-12-12T19:35:13,254 WARN [BP-554092088-172.17.0.2-1734031916410 heartbeating to localhost/127.0.0.1:38311 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-12T19:35:13,254 WARN [BP-554092088-172.17.0.2-1734031916410 heartbeating to localhost/127.0.0.1:38311 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-554092088-172.17.0.2-1734031916410 (Datanode Uuid e4e4933b-84ad-40c1-bcb6-aee34af92fef) service to localhost/127.0.0.1:38311 2024-12-12T19:35:13,255 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-12T19:35:13,255 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-12T19:35:13,257 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/cluster_c86be23e-f44a-87b2-e345-8f5fe86e6095/dfs/data/data1/current/BP-554092088-172.17.0.2-1734031916410 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T19:35:13,258 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/cluster_c86be23e-f44a-87b2-e345-8f5fe86e6095/dfs/data/data2/current/BP-554092088-172.17.0.2-1734031916410 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T19:35:13,258 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-12T19:35:13,305 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-12T19:35:13,318 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T19:35:13,318 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T19:35:13,319 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T19:35:13,319 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/854c470c-5416-afb5-782c-ddfe00b13463/hadoop.log.dir/,STOPPED} 2024-12-12T19:35:13,341 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-12T19:35:13,547 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down